Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding RBAC support in kubernetes config (V2) #694

Merged
merged 17 commits into from May 31, 2019
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 2 additions & 4 deletions bin/run_travis.sh
Expand Up @@ -71,7 +71,5 @@ retry_test() {
retry_test python kubernetes_integration_test.py; sleep 30
retry_test python kubernetes_multi_frontend.py; sleep 30
retry_test python kubernetes_namespace.py; sleep 30
retry_test python multi_tenancy_test.py --kubernetes

# TODO: disabled for now, will re-enable after RBAC PR
# time python clipper_metric_kube.py
retry_test python multi_tenancy_test.py --kubernetes; sleep 30
retry_test python clipper_metric_kube.py
Expand Up @@ -63,6 +63,10 @@
'deployment': 'prom_deployment.yaml',
'config': 'prom_configmap.yaml'
},
'rbac': {
'clusterrole': 'rbac_cluster_role.yaml',
'clusterrolebinding': 'rbac_cluster_role_binding.yaml',
},
'model': {
'deployment': 'model-container-template.yaml'
}
Expand Down Expand Up @@ -163,6 +167,8 @@ def __init__(self,
configuration.assert_hostname = False
self._k8s_v1 = client.CoreV1Api()
self._k8s_beta = client.ExtensionsV1beta1Api()
self._k8s_rbac = client.RbacAuthorizationV1beta1Api()


# Create the template engine
# Config: Any variable missing -> Error
Expand Down Expand Up @@ -249,6 +255,7 @@ def start_clipper(self,
qf_http_timeout_request,
qf_http_timeout_content,
num_frontend_replicas=1):
self._config_rbac()
self._start_redis()
self._start_mgmt(mgmt_frontend_image)
self.num_frontend_replicas = num_frontend_replicas
Expand Down Expand Up @@ -370,6 +377,7 @@ def _start_prometheus(self):
CONFIG_FILES['metric']['deployment'],
version=PROM_VERSION,
cluster_name=self.cluster_name,
service_account_name=self.cluster_name+"-prometheus"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can't find service_account_name in prom_deployment.yaml.

Copy link
Collaborator

@rkooo567 rkooo567 May 30, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, I decided to use default as prom_deployment service account because

  1. it works
  2. There's no reason to setup specific service account for users. We can just leave it to users.

I will delete this line.

)
self._k8s_beta.create_namespaced_deployment(
body=deployment_data, namespace=self.k8s_namespace)
Expand All @@ -383,6 +391,21 @@ def _start_prometheus(self):
self._k8s_v1.create_namespaced_service(
body=service_data, namespace=self.k8s_namespace)

def _config_rbac(self):
with _pass_conflicts():
clusterrole_data = self._generate_config(
CONFIG_FILES['rbac']['clusterrole'],
cluster_name=self.cluster_name, namespace=self.k8s_namespace)
self._k8s_rbac.create_cluster_role(
body=clusterrole_data)

with _pass_conflicts():
clusterrolebinding_data = self._generate_config(
CONFIG_FILES['rbac']['clusterrolebinding'],
cluster_name=self.cluster_name, namespace=self.k8s_namespace)
self._k8s_rbac.create_cluster_role_binding(
body=clusterrolebinding_data)

def _generate_config(self, file_path, **kwargs):
template = self.template_engine.get_template(file_path)
rendered = template.render(**kwargs)
Expand Down Expand Up @@ -659,6 +682,12 @@ def stop_all(self, graceful=True):

self._k8s_v1.delete_collection_namespaced_config_map(
namespace=self.k8s_namespace, label_selector=cluster_selector)

self._k8s_rbac.delete_collection_cluster_role(
label_selector=cluster_selector)

self._k8s_rbac.delete_collection_cluster_role_binding(
label_selector=cluster_selector)
except ApiException as e:
logging.warning(
"Exception deleting kubernetes resources: {}".format(e))
Expand Down
23 changes: 23 additions & 0 deletions clipper_admin/clipper_admin/kubernetes/rbac_cluster_role.yaml
@@ -0,0 +1,23 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
ai.clipper.container.label: {{ cluster_name }}
ai.clipper.name: prom-cluster-role
name: {{cluster_name}}-prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
@@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
ai.clipper.container.label: {{ cluster_name }}
ai.clipper.name: prom-cluster-role-binding
name: {{cluster_name}}-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{cluster_name}}-prometheus
subjects:
- kind: ServiceAccount
name: default
namespace: {{ namespace }}