Skip to content

Commit

Permalink
use cluster role aggregation for admin, edit, and view
Browse files Browse the repository at this point in the history
  • Loading branch information
deads2k committed Jan 12, 2018
1 parent ff839e6 commit b81b982
Show file tree
Hide file tree
Showing 7 changed files with 221 additions and 881 deletions.
106 changes: 30 additions & 76 deletions pkg/cmd/server/bootstrappolicy/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,35 +266,12 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole {
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: AdminRoleName,
Annotations: map[string]string{
oapi.OpenShiftDescription: "A user that has edit rights within the project and can change the project's membership.",
},
},
// a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users.
ObjectMeta: metav1.ObjectMeta{Name: "system:openshift:aggregate-to-admin", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}},
Rules: []rbac.PolicyRule{
rbac.NewRule(readWrite...).Groups(kapiGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
rbac.NewRule(readWrite...).Groups(kapiGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets").RuleOrDie(),
rbac.NewRule(read...).Groups(kapiGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
"namespaces", "pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
rbac.NewRule("impersonate").Groups(kapiGroup).Resources("serviceaccounts").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(appsGroup, extensionsGroup).Resources("replicationcontrollers/scale",
"replicasets", "replicasets/scale", "deployments", "deployments/scale", "deployments/rollback").RuleOrDie(),
rbac.NewRule(read...).Groups(appsGroup, extensionsGroup).Resources("daemonsets").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(appsGroup).Resources("statefulsets", "deployments", "deployments/scale", "deployments/status").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(authzGroup, legacyAuthzGroup).Resources("roles", "rolebindings").RuleOrDie(),
rbac.NewRule(readWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(),
rbac.NewRule("create").Groups(authzGroup, legacyAuthzGroup).Resources("localresourceaccessreviews", "localsubjectaccessreviews", "subjectrulesreviews").RuleOrDie(),
rbac.NewRule("create").Groups(securityGroup, legacySecurityGroup).Resources("podsecuritypolicysubjectreviews", "podsecuritypolicyselfsubjectreviews", "podsecuritypolicyreviews").RuleOrDie(),
rbac.NewRule("create").Groups(kAuthzGroup).Resources("localsubjectaccessreviews").RuleOrDie(),

rbac.NewRule(read...).Groups(authzGroup, legacyAuthzGroup).Resources("rolebindingrestrictions").RuleOrDie(),

Expand Down Expand Up @@ -328,7 +305,7 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole {

rbac.NewRule(readWrite...).Groups(templateGroup, legacyTemplateGroup).Resources("templates", "templateconfigs", "processedtemplates", "templateinstances").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(extensionsGroup, networkingGroup).Resources("networkpolicies").RuleOrDie(),
rbac.NewRule(readWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),

// backwards compatibility
rbac.NewRule(readWrite...).Groups(buildGroup, legacyBuildGroup).Resources("buildlogs").RuleOrDie(),
Expand All @@ -337,30 +314,11 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole {
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: EditRoleName,
Annotations: map[string]string{
oapi.OpenShiftDescription: "A user that can create and edit most objects in a project, but can not update the project's membership.",
},
},
// a role for a namespace level editor. It grants access to all user level actions in a namespace.
// It does not grant powers for "privileged" resources which are domain of the system: `/status`
// subresources or `quota`/`limits` which are used to control namespaces
ObjectMeta: metav1.ObjectMeta{Name: "system:openshift:aggregate-to-edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}},
Rules: []rbac.PolicyRule{
rbac.NewRule(readWrite...).Groups(kapiGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
rbac.NewRule(readWrite...).Groups(kapiGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets").RuleOrDie(),
rbac.NewRule(read...).Groups(kapiGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
"namespaces", "pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
rbac.NewRule("impersonate").Groups(kapiGroup).Resources("serviceaccounts").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(appsGroup, extensionsGroup).Resources("replicationcontrollers/scale",
"replicasets", "replicasets/scale", "deployments", "deployments/scale", "deployments/rollback").RuleOrDie(),
rbac.NewRule(read...).Groups(appsGroup, extensionsGroup).Resources("daemonsets").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(appsGroup).Resources("statefulsets", "deployments", "deployments/scale", "deployments/status").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(buildGroup, legacyBuildGroup).Resources("builds", "buildconfigs", "buildconfigs/webhooks").RuleOrDie(),
rbac.NewRule(read...).Groups(buildGroup, legacyBuildGroup).Resources("builds/log").RuleOrDie(),
rbac.NewRule("create").Groups(buildGroup, legacyBuildGroup).Resources("buildconfigs/instantiate", "buildconfigs/instantiatebinary", "builds/clone").RuleOrDie(),
Expand Down Expand Up @@ -389,36 +347,18 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole {

rbac.NewRule(readWrite...).Groups(templateGroup, legacyTemplateGroup).Resources("templates", "templateconfigs", "processedtemplates", "templateinstances").RuleOrDie(),

rbac.NewRule(readWrite...).Groups(extensionsGroup, networkingGroup).Resources("networkpolicies").RuleOrDie(),
rbac.NewRule(readWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),

// backwards compatibility
rbac.NewRule(readWrite...).Groups(buildGroup, legacyBuildGroup).Resources("buildlogs").RuleOrDie(),
rbac.NewRule(read...).Groups(kapiGroup).Resources("resourcequotausages").RuleOrDie(),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: ViewRoleName,
Annotations: map[string]string{
oapi.OpenShiftDescription: "A user who can view but not edit any resources within the project. They can not view secrets or membership.",
},
},
// a role for namespace level viewing. It grants Read-only access to non-escalating resources in
// a namespace.
ObjectMeta: metav1.ObjectMeta{Name: "system:openshift:aggregate-to-view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}},
Rules: []rbac.PolicyRule{
// TODO add "replicationcontrollers/scale" here
rbac.NewRule(read...).Groups(kapiGroup).Resources("pods", "replicationcontrollers", "serviceaccounts",
"services", "endpoints", "persistentvolumeclaims", "configmaps").RuleOrDie(),
rbac.NewRule(read...).Groups(kapiGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
"namespaces", "pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),

rbac.NewRule(read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),

rbac.NewRule(read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),

rbac.NewRule(read...).Groups(appsGroup, extensionsGroup).Resources("deployments", "deployments/scale", "replicasets", "replicasets/scale").RuleOrDie(),
rbac.NewRule(read...).Groups(appsGroup, extensionsGroup).Resources("daemonsets").RuleOrDie(),

rbac.NewRule(read...).Groups(appsGroup).Resources("statefulsets", "deployments", "deployments/scale").RuleOrDie(),

rbac.NewRule(read...).Groups(buildGroup, legacyBuildGroup).Resources("builds", "buildconfigs", "buildconfigs/webhooks").RuleOrDie(),
rbac.NewRule(read...).Groups(buildGroup, legacyBuildGroup).Resources("builds/log").RuleOrDie(),
// access to jenkins
Expand Down Expand Up @@ -896,6 +836,25 @@ func GetBootstrapClusterRoles() []rbac.ClusterRole {
}
role.Annotations[roleSystemOnly] = roleIsSystemOnly
}

// add a couple selected descriptions
switch role.Name {
case "admin":
if role.Annotations == nil {
role.Annotations = map[string]string{}
}
role.Annotations[oapi.OpenShiftDescription] = "A user that has edit rights within the project and can change the project's membership."
case "edit":
if role.Annotations == nil {
role.Annotations = map[string]string{}
}
role.Annotations[oapi.OpenShiftDescription] = "A user that can create and edit most objects in a project, but can not update the project's membership."
case "view":
if role.Annotations == nil {
role.Annotations = map[string]string{}
}
role.Annotations[oapi.OpenShiftDescription] = "A user who can view but not edit any resources within the project. They can not view secrets or membership."
}
}

return finalClusterRoles
Expand Down Expand Up @@ -1045,11 +1004,6 @@ func GetBootstrapClusterRoleBindings() []rbac.ClusterRoleBinding {
// clusterRoleConflicts lists the roles which are known to conflict with upstream and which we have manually
// deconflicted with our own.
var clusterRoleConflicts = sets.NewString(
// these require special treatment to handle origin resources
"admin",
"edit",
"view",

// TODO this should probably be re-swizzled to be the delta on top of the kube role
"system:discovery",

Expand Down
30 changes: 15 additions & 15 deletions pkg/cmd/server/bootstrappolicy/policy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,9 @@ func testObjects(t *testing.T, list *api.List, fixtureFilename string) {
// Some roles should always cover others
func TestCovers(t *testing.T) {
allRoles := bootstrappolicy.GetBootstrapClusterRoles()
var admin *rbac.ClusterRole
var editor *rbac.ClusterRole
var viewer *rbac.ClusterRole
var admin []rbac.PolicyRule
var editor []rbac.PolicyRule
var viewer []rbac.PolicyRule
var registryAdmin *rbac.ClusterRole
var registryEditor *rbac.ClusterRole
var registryViewer *rbac.ClusterRole
Expand All @@ -158,12 +158,12 @@ func TestCovers(t *testing.T) {
for i := range allRoles {
role := allRoles[i]
switch role.Name {
case bootstrappolicy.AdminRoleName:
admin = &role
case bootstrappolicy.EditRoleName:
editor = &role
case bootstrappolicy.ViewRoleName:
viewer = &role
case "system:openshift:aggregate-to-admin", "system:aggregate-to-admin":
admin = append(admin, role.Rules...)
case "system:openshift:aggregate-to-edit", "system:aggregate-to-edit":
editor = append(editor, role.Rules...)
case "system:openshift:aggregate-to-view", "system:aggregate-to-view":
viewer = append(viewer, role.Rules...)
case bootstrappolicy.RegistryAdminRoleName:
registryAdmin = &role
case bootstrappolicy.RegistryEditorRoleName:
Expand All @@ -185,16 +185,16 @@ func TestCovers(t *testing.T) {
}
}

if covers, miss := rulevalidation.Covers(admin.Rules, editor.Rules); !covers {
if covers, miss := rulevalidation.Covers(admin, editor); !covers {
t.Errorf("failed to cover: %#v", miss)
}
if covers, miss := rulevalidation.Covers(admin.Rules, editor.Rules); !covers {
if covers, miss := rulevalidation.Covers(admin, editor); !covers {
t.Errorf("failed to cover: %#v", miss)
}
if covers, miss := rulevalidation.Covers(admin.Rules, viewer.Rules); !covers {
if covers, miss := rulevalidation.Covers(admin, viewer); !covers {
t.Errorf("failed to cover: %#v", miss)
}
if covers, miss := rulevalidation.Covers(admin.Rules, registryAdmin.Rules); !covers {
if covers, miss := rulevalidation.Covers(admin, registryAdmin.Rules); !covers {
t.Errorf("failed to cover: %#v", miss)
}
if covers, miss := rulevalidation.Covers(clusterAdmin.Rules, storageAdmin.Rules); !covers {
Expand All @@ -208,10 +208,10 @@ func TestCovers(t *testing.T) {
}

// admin and editor should cover imagebuilder
if covers, miss := rulevalidation.Covers(admin.Rules, imageBuilder.Rules); !covers {
if covers, miss := rulevalidation.Covers(admin, imageBuilder.Rules); !covers {
t.Errorf("failed to cover: %#v", miss)
}
if covers, miss := rulevalidation.Covers(editor.Rules, imageBuilder.Rules); !covers {
if covers, miss := rulevalidation.Covers(editor, imageBuilder.Rules); !covers {
t.Errorf("failed to cover: %#v", miss)
}

Expand Down
3 changes: 3 additions & 0 deletions pkg/cmd/server/bootstrappolicy/web_console_role_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ var rolesToHide = sets.NewString(
"system:aggregate-to-edit",
"system:aggregate-to-view",
"system:aws-cloud-provider",
"system:openshift:aggregate-to-admin",
"system:openshift:aggregate-to-edit",
"system:openshift:aggregate-to-view",
)

func TestSystemOnlyRoles(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion test/integration/ingressip_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func TestIngressIPAllocation(t *testing.T) {
defer testserver.CleanupMasterEtcd(t, masterConfig)
masterConfig.NetworkConfig.ExternalIPNetworkCIDRs = []string{"172.16.0.0/24"}
masterConfig.NetworkConfig.IngressIPNetworkCIDR = "172.16.1.0/24"
clusterAdminKubeConfig, err := testserver.StartConfiguredMasterWithOptions(masterConfig, testserver.TestOptions{})
clusterAdminKubeConfig, err := testserver.StartConfiguredMasterWithOptions(masterConfig)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
Expand Down

0 comments on commit b81b982

Please sign in to comment.