diff --git a/pkg/authorization/authorizer/scope/converter.go b/pkg/authorization/authorizer/scope/converter.go index e1c2cf5fd875..5dbbbbb9059c 100644 --- a/pkg/authorization/authorizer/scope/converter.go +++ b/pkg/authorization/authorizer/scope/converter.go @@ -12,6 +12,7 @@ import ( "k8s.io/kubernetes/pkg/util/sets" authorizationapi "github.com/openshift/origin/pkg/authorization/api" + "github.com/openshift/origin/pkg/authorization/authorizer" "github.com/openshift/origin/pkg/client" oauthapi "github.com/openshift/origin/pkg/oauth/api" projectapi "github.com/openshift/origin/pkg/project/api" @@ -48,6 +49,41 @@ func ScopesToRules(scopes []string, namespace string, clusterPolicyGetter client return rules, kutilerrors.NewAggregate(errors) } +// ScopesToVisibleNamespaces returns a list of namespaces that the provided scopes have "get" access to. +// This exists only to support efficiently list/watch of projects (ACLed namespaces) +func ScopesToVisibleNamespaces(scopes []string, clusterPolicyGetter client.ClusterPolicyLister) (sets.String, error) { + if len(scopes) == 0 { + return sets.NewString("*"), nil + } + + visibleNamespaces := sets.String{} + + errors := []error{} + for _, scope := range scopes { + found := false + + for _, evaluator := range ScopeEvaluators { + if evaluator.Handles(scope) { + found = true + allowedNamespaces, err := evaluator.ResolveGettableNamespaces(scope, clusterPolicyGetter) + if err != nil { + errors = append(errors, err) + continue + } + + visibleNamespaces.Insert(allowedNamespaces...) + break + } + } + + if !found { + errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope)) + } + } + + return visibleNamespaces, kutilerrors.NewAggregate(errors) +} + const ( UserIndicator = "user:" ClusterRoleIndicator = "role:" @@ -61,6 +97,7 @@ type ScopeEvaluator interface { Describe(scope string) string Validate(scope string) error ResolveRules(scope, namespace string, clusterPolicyGetter client.ClusterPolicyLister) ([]authorizationapi.PolicyRule, error) + ResolveGettableNamespaces(scope string, clusterPolicyGetter client.ClusterPolicyLister) ([]string, error) } // ScopeEvaluators map prefixes to a function that handles that prefix @@ -134,7 +171,7 @@ func (userEvaluator) ResolveRules(scope, namespace string, clusterPolicyGetter c }, nil case UserListProject: return []authorizationapi.PolicyRule{ - {Verbs: sets.NewString("list"), APIGroups: []string{projectapi.GroupName}, Resources: sets.NewString("projects")}, + {Verbs: sets.NewString("list", "watch"), APIGroups: []string{projectapi.GroupName}, Resources: sets.NewString("projects")}, }, nil case UserFull: return []authorizationapi.PolicyRule{ @@ -146,6 +183,15 @@ func (userEvaluator) ResolveRules(scope, namespace string, clusterPolicyGetter c } } +func (userEvaluator) ResolveGettableNamespaces(scope string, clusterPolicyGetter client.ClusterPolicyLister) ([]string, error) { + switch scope { + case UserFull: + return []string{"*"}, nil + default: + return []string{}, nil + } +} + // escalatingScopeResources are resources that are considered escalating for scope evaluation var escalatingScopeResources = []unversioned.GroupResource{ {Group: kapi.GroupName, Resource: "secrets"}, @@ -175,6 +221,12 @@ func (e clusterRoleEvaluator) parseScope(scope string) (string /*role name*/, st if !e.Handles(scope) { return "", "", false, fmt.Errorf("bad format for scope %v", scope) } + return ParseClusterRoleScope(scope) +} +func ParseClusterRoleScope(scope string) (string /*role name*/, string /*namespace*/, bool /*escalating*/, error) { + if !strings.HasPrefix(scope, ClusterRoleIndicator) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } escalating := false if strings.HasSuffix(scope, ":!") { escalating = true @@ -214,7 +266,7 @@ func (e clusterRoleEvaluator) Describe(scope string) string { } func (e clusterRoleEvaluator) ResolveRules(scope, namespace string, clusterPolicyGetter client.ClusterPolicyLister) ([]authorizationapi.PolicyRule, error) { - roleName, scopeNamespace, escalating, err := e.parseScope(scope) + _, scopeNamespace, _, err := e.parseScope(scope) if err != nil { return nil, err } @@ -224,6 +276,16 @@ func (e clusterRoleEvaluator) ResolveRules(scope, namespace string, clusterPolic return []authorizationapi.PolicyRule{}, nil } + return e.resolveRules(scope, clusterPolicyGetter) +} + +// resolveRules doesn't enforce namespace checks +func (e clusterRoleEvaluator) resolveRules(scope string, clusterPolicyGetter client.ClusterPolicyLister) ([]authorizationapi.PolicyRule, error) { + roleName, _, escalating, err := e.parseScope(scope) + if err != nil { + return nil, err + } + policy, err := clusterPolicyGetter.Get("default") if err != nil { return nil, err @@ -252,6 +314,37 @@ func (e clusterRoleEvaluator) ResolveRules(scope, namespace string, clusterPolic return rules, nil } +func (e clusterRoleEvaluator) ResolveGettableNamespaces(scope string, clusterPolicyGetter client.ClusterPolicyLister) ([]string, error) { + _, scopeNamespace, _, err := e.parseScope(scope) + if err != nil { + return nil, err + } + rules, err := e.resolveRules(scope, clusterPolicyGetter) + if err != nil { + return nil, err + } + + attributes := authorizer.DefaultAuthorizationAttributes{ + APIGroup: kapi.GroupName, + Verb: "get", + Resource: "namespaces", + } + + errors := []error{} + for _, rule := range rules { + matches, err := attributes.RuleMatches(rule) + if err != nil { + errors = append(errors, err) + continue + } + if matches { + return []string{scopeNamespace}, nil + } + } + + return []string{}, kutilerrors.NewAggregate(errors) +} + // TODO: direct deep copy needing a cloner is something that should be fixed upstream var localCloner = conversion.NewCloner() diff --git a/pkg/project/auth/cache.go b/pkg/project/auth/cache.go index e8610417ffb9..1fbdaa88d944 100644 --- a/pkg/project/auth/cache.go +++ b/pkg/project/auth/cache.go @@ -17,6 +17,8 @@ import ( utilwait "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" + authorizationapi "github.com/openshift/origin/pkg/authorization/api" + "github.com/openshift/origin/pkg/authorization/authorizer/scope" "github.com/openshift/origin/pkg/client" ) @@ -228,6 +230,10 @@ func (ac *AuthorizationCache) RemoveWatcher(watcher CacheWatcher) { } } +func (ac *AuthorizationCache) GetClusterPolicyLister() client.SyncedClusterPoliciesListerInterface { + return ac.clusterPolicyLister +} + // synchronizeNamespaces synchronizes access over each namespace and returns a set of namespace names that were looked at in last sync func (ac *AuthorizationCache) synchronizeNamespaces(userSubjectRecordStore cache.Store, groupSubjectRecordStore cache.Store, reviewRecordStore cache.Store) sets.String { namespaceSet := sets.NewString() @@ -430,14 +436,22 @@ func (ac *AuthorizationCache) List(userInfo user.Info) (*kapi.NamespaceList, err } } + allowedNamespaces, err := scope.ScopesToVisibleNamespaces(userInfo.GetExtra()[authorizationapi.ScopesKey], ac.clusterPolicyLister.ClusterPolicies()) + if err != nil { + return nil, err + } + namespaceList := &kapi.NamespaceList{} for key := range keys { - namespace, exists, err := ac.namespaceStore.GetByKey(key) + namespaceObj, exists, err := ac.namespaceStore.GetByKey(key) if err != nil { return nil, err } if exists { - namespaceList.Items = append(namespaceList.Items, *namespace.(*kapi.Namespace)) + namespace := *namespaceObj.(*kapi.Namespace) + if allowedNamespaces.Has("*") || allowedNamespaces.Has(namespace.Name) { + namespaceList.Items = append(namespaceList.Items, namespace) + } } } return namespaceList, nil diff --git a/pkg/project/auth/watch.go b/pkg/project/auth/watch.go index 3c0ec8dd1c5b..5371c2a4440f 100644 --- a/pkg/project/auth/watch.go +++ b/pkg/project/auth/watch.go @@ -33,8 +33,9 @@ type WatchableCache interface { // userProjectWatcher converts a native etcd watch to a watch.Interface. type userProjectWatcher struct { - username string - groups []string + user user.Info + // visibleNamespaces are the namespaces that the scopes allow + visibleNamespaces sets.String // cacheIncoming is a buffered channel used for notification to watcher. If the buffer fills up, // then the watcher will be removed and the connection will be broken. @@ -69,9 +70,8 @@ var ( watchChannelHWM etcd.HighWaterMark ) -func NewUserProjectWatcher(username string, groups []string, projectCache *projectcache.ProjectCache, authCache WatchableCache, includeAllExistingProjects bool) *userProjectWatcher { - userInfo := &user.DefaultInfo{Name: username, Groups: groups} - namespaces, _ := authCache.List(userInfo) +func NewUserProjectWatcher(user user.Info, visibleNamespaces sets.String, projectCache *projectcache.ProjectCache, authCache WatchableCache, includeAllExistingProjects bool) *userProjectWatcher { + namespaces, _ := authCache.List(user) knownProjects := map[string]string{} for _, namespace := range namespaces.Items { knownProjects[namespace.Name] = namespace.ResourceVersion @@ -84,8 +84,8 @@ func NewUserProjectWatcher(username string, groups []string, projectCache *proje } w := &userProjectWatcher{ - username: username, - groups: groups, + user: user, + visibleNamespaces: visibleNamespaces, cacheIncoming: make(chan watch.Event, 1000), cacheError: make(chan error, 1), @@ -107,7 +107,12 @@ func NewUserProjectWatcher(username string, groups []string, projectCache *proje } func (w *userProjectWatcher) GroupMembershipChanged(namespaceName string, users, groups sets.String) { - hasAccess := users.Has(w.username) || groups.HasAny(w.groups...) + if !w.visibleNamespaces.Has("*") && !w.visibleNamespaces.Has(namespaceName) { + // this user is scoped to a level that shouldn't see this update + return + } + + hasAccess := users.Has(w.user.GetName()) || groups.HasAny(w.user.GetGroups()...) _, known := w.knownProjects[namespaceName] switch { diff --git a/pkg/project/auth/watch_test.go b/pkg/project/auth/watch_test.go index 50851c1998b1..92fdbcd74a31 100644 --- a/pkg/project/auth/watch_test.go +++ b/pkg/project/auth/watch_test.go @@ -16,7 +16,7 @@ import ( projectcache "github.com/openshift/origin/pkg/project/cache" ) -func newTestWatcher(user string, groups []string, namespaces ...*kapi.Namespace) (*userProjectWatcher, *fakeAuthCache) { +func newTestWatcher(username string, groups []string, namespaces ...*kapi.Namespace) (*userProjectWatcher, *fakeAuthCache) { objects := []runtime.Object{} for i := range namespaces { objects = append(objects, namespaces[i]) @@ -27,7 +27,7 @@ func newTestWatcher(user string, groups []string, namespaces ...*kapi.Namespace) projectCache.Run() fakeAuthCache := &fakeAuthCache{} - return NewUserProjectWatcher(user, groups, projectCache, fakeAuthCache, false), fakeAuthCache + return NewUserProjectWatcher(&user.DefaultInfo{Name: username, Groups: groups}, sets.NewString("*"), projectCache, fakeAuthCache, false), fakeAuthCache } type fakeAuthCache struct { diff --git a/pkg/project/registry/project/proxy/proxy.go b/pkg/project/registry/project/proxy/proxy.go index c144fd040570..b1bada975d43 100644 --- a/pkg/project/registry/project/proxy/proxy.go +++ b/pkg/project/registry/project/proxy/proxy.go @@ -15,6 +15,8 @@ import ( "k8s.io/kubernetes/pkg/watch" oapi "github.com/openshift/origin/pkg/api" + authorizationapi "github.com/openshift/origin/pkg/authorization/api" + "github.com/openshift/origin/pkg/authorization/authorizer/scope" "github.com/openshift/origin/pkg/project/api" projectapi "github.com/openshift/origin/pkg/project/api" projectauth "github.com/openshift/origin/pkg/project/auth" @@ -91,7 +93,12 @@ func (s *REST) Watch(ctx kapi.Context, options *kapi.ListOptions) (watch.Interfa includeAllExistingProjects := (options != nil) && options.ResourceVersion == "0" - watcher := projectauth.NewUserProjectWatcher(userInfo.GetName(), userInfo.GetGroups(), s.projectCache, s.authCache, includeAllExistingProjects) + allowedNamespaces, err := scope.ScopesToVisibleNamespaces(userInfo.GetExtra()[authorizationapi.ScopesKey], s.authCache.GetClusterPolicyLister().ClusterPolicies()) + if err != nil { + return nil, err + } + + watcher := projectauth.NewUserProjectWatcher(userInfo, allowedNamespaces, s.projectCache, s.authCache, includeAllExistingProjects) s.authCache.AddWatcher(watcher) go watcher.Watch() diff --git a/test/cmd/authentication.sh b/test/cmd/authentication.sh index 564ff7ab9943..339a77ae8af2 100755 --- a/test/cmd/authentication.sh +++ b/test/cmd/authentication.sh @@ -40,7 +40,9 @@ os::cmd::expect_success_and_text "oc get user/~ --token='${whoamitoken}'" "${use os::cmd::expect_failure_and_text "oc get pods --token='${whoamitoken}' -n '${project}'" "prevent this action; User \"scoped-user\" cannot list pods in project \"${project}\"" listprojecttoken="$(oc process -f "${OS_ROOT}/test/testdata/authentication/scoped-token-template.yaml" TOKEN_PREFIX=listproject SCOPE=user:list-projects USER_NAME="${username}" USER_UID="${useruid}" | oc create -f - -o name | awk -F/ '{print $2}')" -os::cmd::expect_success_and_text "oc get projects --token='${listprojecttoken}'" "${project}" +# this token doesn't have rights to see any projects even though it can hit the list endpoint, so an empty list is correct +# we'll add another scope that allows listing all known projects even if this token has no other powers in them. +os::cmd::expect_success_and_not_text "oc get projects --token='${listprojecttoken}'" "${project}" os::cmd::expect_failure_and_text "oc get user/~ --token='${listprojecttoken}'" 'prevent this action; User "scoped-user" cannot get users at the cluster scope' os::cmd::expect_failure_and_text "oc get pods --token='${listprojecttoken}' -n '${project}'" "prevent this action; User \"scoped-user\" cannot list pods in project \"${project}\"" diff --git a/test/integration/project_test.go b/test/integration/project_test.go index ad9f07e1dd20..8670b14ded0e 100644 --- a/test/integration/project_test.go +++ b/test/integration/project_test.go @@ -1,12 +1,15 @@ package integration import ( + "fmt" "testing" "time" kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" + "github.com/openshift/origin/pkg/authorization/authorizer/scope" buildapi "github.com/openshift/origin/pkg/build/api" policy "github.com/openshift/origin/pkg/cmd/admin/policy" "github.com/openshift/origin/pkg/cmd/server/bootstrappolicy" @@ -234,7 +237,6 @@ func TestProjectWatch(t *testing.T) { case <-time.After(3 * time.Second): } - } func waitForDelete(projectName string, w watch.Interface, t *testing.T) { @@ -267,3 +269,172 @@ func waitForAdd(projectName string, w watch.Interface, t *testing.T) { } } } +func waitForOnlyAdd(projectName string, w watch.Interface, t *testing.T) { + select { + case event := <-w.ResultChan(): + project := event.Object.(*projectapi.Project) + t.Logf("got %#v %#v", event, project) + if event.Type == watch.Added && project.Name == projectName { + return + } + t.Errorf("got unexpected project %v", project.Name) + + case <-time.After(30 * time.Second): + t.Fatalf("timeout: %v", projectName) + } +} +func waitForOnlyDelete(projectName string, w watch.Interface, t *testing.T) { + select { + case event := <-w.ResultChan(): + project := event.Object.(*projectapi.Project) + t.Logf("got %#v %#v", event, project) + if event.Type == watch.Deleted && project.Name == projectName { + return + } + t.Errorf("got unexpected project %v", project.Name) + + case <-time.After(30 * time.Second): + t.Fatalf("timeout: %v", projectName) + } +} + +func TestScopedProjectAccess(t *testing.T) { + testutil.RequireEtcd(t) + defer testutil.DumpEtcdOnFailure(t) + _, clusterAdminKubeConfig, err := testserver.StartTestMaster() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + fullBobClient, _, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "bob") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + oneTwoBobClient, _, _, err := testutil.GetScopedClientForUser(clusterAdminClient, *clusterAdminClientConfig, "bob", []string{ + scope.UserListProject, + scope.ClusterRoleIndicator + "view:one", + scope.ClusterRoleIndicator + "view:two", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + twoThreeBobClient, _, _, err := testutil.GetScopedClientForUser(clusterAdminClient, *clusterAdminClientConfig, "bob", []string{ + scope.UserListProject, + scope.ClusterRoleIndicator + "view:two", + scope.ClusterRoleIndicator + "view:three", + }) + + allBobClient, _, _, err := testutil.GetScopedClientForUser(clusterAdminClient, *clusterAdminClientConfig, "bob", []string{ + scope.UserListProject, + scope.ClusterRoleIndicator + "view:*", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + oneTwoWatch, err := oneTwoBobClient.Projects().Watch(kapi.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + twoThreeWatch, err := twoThreeBobClient.Projects().Watch(kapi.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + allWatch, err := allBobClient.Projects().Watch(kapi.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "one", "bob"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + waitForOnlyAdd("one", allWatch, t) + waitForOnlyAdd("one", oneTwoWatch, t) + + if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "two", "bob"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + waitForOnlyAdd("two", allWatch, t) + waitForOnlyAdd("two", oneTwoWatch, t) + waitForOnlyAdd("two", twoThreeWatch, t) + + if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "three", "bob"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + waitForOnlyAdd("three", allWatch, t) + waitForOnlyAdd("three", twoThreeWatch, t) + + if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "four", "bob"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + waitForOnlyAdd("four", allWatch, t) + + oneTwoProjects, err := oneTwoBobClient.Projects().List(kapi.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := hasExactlyTheseProjects(oneTwoProjects, sets.NewString("one", "two")); err != nil { + t.Error(err) + } + twoThreeProjects, err := twoThreeBobClient.Projects().List(kapi.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := hasExactlyTheseProjects(twoThreeProjects, sets.NewString("two", "three")); err != nil { + t.Error(err) + } + allProjects, err := allBobClient.Projects().List(kapi.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := hasExactlyTheseProjects(allProjects, sets.NewString("one", "two", "three", "four")); err != nil { + t.Error(err) + } + + if err := fullBobClient.Projects().Delete("four"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + waitForOnlyDelete("four", allWatch, t) + + if err := fullBobClient.Projects().Delete("three"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + waitForOnlyDelete("three", allWatch, t) + waitForOnlyDelete("three", twoThreeWatch, t) + + if err := fullBobClient.Projects().Delete("two"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + waitForOnlyDelete("two", allWatch, t) + waitForOnlyDelete("two", oneTwoWatch, t) + waitForOnlyDelete("two", twoThreeWatch, t) + + if err := fullBobClient.Projects().Delete("one"); err != nil { + t.Fatalf("unexpected error: %v", err) + } + waitForOnlyDelete("one", allWatch, t) + waitForOnlyDelete("one", oneTwoWatch, t) +} + +func hasExactlyTheseProjects(list *projectapi.ProjectList, projects sets.String) error { + if len(list.Items) != len(projects) { + return fmt.Errorf("expected %v, got %v", projects, list.Items) + } + for _, project := range list.Items { + if !projects.Has(project.Name) { + return fmt.Errorf("expected %v, got %v", projects, list.Items) + } + } + return nil +} diff --git a/test/util/client.go b/test/util/client.go index bf734ed36012..ea9dab6a214d 100644 --- a/test/util/client.go +++ b/test/util/client.go @@ -1,6 +1,8 @@ package util import ( + "fmt" + "math/rand" "os" "path" "path/filepath" @@ -16,9 +18,11 @@ import ( "github.com/openshift/origin/pkg/client" configapi "github.com/openshift/origin/pkg/cmd/server/api" + "github.com/openshift/origin/pkg/cmd/server/origin" cmdutil "github.com/openshift/origin/pkg/cmd/util" "github.com/openshift/origin/pkg/cmd/util/clientcmd" "github.com/openshift/origin/pkg/cmd/util/tokencmd" + oauthapi "github.com/openshift/origin/pkg/oauth/api" "github.com/openshift/origin/pkg/serviceaccounts" ) @@ -81,6 +85,42 @@ func GetClientForUser(clientConfig restclient.Config, username string) (*client. return osClient, kubeClient, &userClientConfig, nil } +func GetScopedClientForUser(adminClient *client.Client, clientConfig restclient.Config, username string, scopes []string) (*client.Client, *kclient.Client, *restclient.Config, error) { + // make sure the user exists + if _, _, _, err := GetClientForUser(clientConfig, username); err != nil { + return nil, nil, nil, err + } + user, err := adminClient.Users().Get(username) + if err != nil { + return nil, nil, nil, err + } + + token := &oauthapi.OAuthAccessToken{ + ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("%s-token-plus-some-padding-here-to-make-the-limit-%d", username, rand.Int())}, + ClientName: origin.OpenShiftCLIClientID, + ExpiresIn: 86400, + Scopes: scopes, + RedirectURI: "https://127.0.0.1:12000/oauth/token/implicit", + UserName: user.Name, + UserUID: string(user.UID), + } + if _, err := adminClient.OAuthAccessTokens().Create(token); err != nil { + return nil, nil, nil, err + } + + scopedConfig := clientcmd.AnonymousClientConfig(&clientConfig) + scopedConfig.BearerToken = token.Name + kubeClient, err := kclient.New(&scopedConfig) + if err != nil { + return nil, nil, nil, err + } + osClient, err := client.New(&scopedConfig) + if err != nil { + return nil, nil, nil, err + } + return osClient, kubeClient, &scopedConfig, nil +} + func GetClientForServiceAccount(adminClient *kclient.Client, clientConfig restclient.Config, namespace, name string) (*client.Client, *kclient.Client, *restclient.Config, error) { _, err := adminClient.Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: namespace}}) if err != nil && !kerrs.IsAlreadyExists(err) {