Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bug 1680504. Create collector clusterrole to watch pods/ns #106

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
16 changes: 14 additions & 2 deletions Makefile
Expand Up @@ -31,7 +31,7 @@ OC?=oc
SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*")

#.PHONY: all build clean install uninstall fmt simplify check run
.PHONY: all operator-sdk imagebuilder build clean fmt simplify gendeepcopy deploy-setup deploy-image deploy deploy-example test-unit test-e2e undeploy
.PHONY: all operator-sdk imagebuilder build clean fmt simplify gendeepcopy deploy-setup deploy-image deploy deploy-example test-unit test-e2e undeploy run

all: build #check install

Expand All @@ -56,11 +56,23 @@ build: fmt
@cp -ru $(CURPATH)/vendor/* $(TARGET_DIR)/src
@GOPATH=$(BUILD_GOPATH) $(GOBUILD) $(LDFLAGS) -o $(TARGET) $(MAIN_PKG)

run:
ELASTICSEARCH_IMAGE=quay.io/openshift/origin-logging-elasticsearch5:latest \
FLUENTD_IMAGE=quay.io/openshift/origin-logging-fluentd:latest \
KIBANA_IMAGE=quay.io/openshift/origin-logging-kibana5:latest \
CURATOR_IMAGE=quay.io/openshift/origin-logging-curator5:latest \
OAUTH_PROXY_IMAGE=quay.io/openshift/origin-oauth-proxy:latest \
RSYSLOG_IMAGE=quay.io/viaq/rsyslog:latest \
OPERATOR_NAME=cluster-logging-operator \
WATCH_NAMESPACE=openshift-logging \
KUBERNETES_CONFIG=$(KUBECONFIG) \
go run cmd/cluster-logging-operator/main.go

clean:
@rm -rf $(TARGET_DIR)

image: imagebuilder
@if [ $${USE_IMAGE_STREAM:-false} = false ] ; \
@if [ $${USE_IMAGE_STREAM:-false} = false ] && [ $${SKIP_BUILD:-false} = false ] ; \
then $(IMAGE_BUILDER) -t $(IMAGE_TAG) . $(IMAGE_BUILDER_OPTS) ; \
fi

Expand Down
18 changes: 4 additions & 14 deletions README.md
Expand Up @@ -18,16 +18,7 @@ This will stand up a cluster logging stack named 'example'.

Running locally outside an OKD cluster:
```
$ ELASTICSEARCH_IMAGE=quay.io/openshift/origin-logging-elasticsearch5:latest \
FLUENTD_IMAGE=quay.io/openshift/origin-logging-fluentd:latest \
KIBANA_IMAGE=quay.io/openshift/origin-logging-kibana5:latest \
CURATOR_IMAGE=quay.io/openshift/origin-logging-curator5:latest \
OAUTH_PROXY_IMAGE=quay.io/openshift/origin-oauth-proxy:latest \
RSYSLOG_IMAGE=quay.io/viaq/rsyslog:latest \
OPERATOR_NAME=cluster-logging-operator \
WATCH_NAMESPACE=openshift-logging \
KUBERNETES_CONFIG=/etc/origin/master/admin.kubeconfig \
go run cmd/cluster-logging-operator/main.go
$ make run
```
### `make` targets
Various `make` targets are included to simplify building and deploying the operator
Expand All @@ -48,9 +39,8 @@ The deployment can be optionally modified using any of the following:
|`IMAGE_BUILDER`|`imagebuilder`| The command to build the container image|
|`EXCLUSIONS`|none|The list of manifest files that should will be ignored|
|`OC`|`oc` in `PATH`| The openshift binary to use to deploy resources|
|`REMOTE_REGISTRY`|false|`true` if you are running the cluster on a different machine
than the one you are developing on|

|`REMOTE_REGISTRY`|false|`true` if you are running the cluster on a different machine than the one you are developing on|

**Note:** Use `REMOTE_REGISTRY=true`, for example, if you are running a cluster in a
local libvirt or minishift environment; you may want to build the image on the host
and push them to the cluster running in the VM. This requires a username with a password (i.e. not the default `system:admin` user).
Expand Down Expand Up @@ -100,5 +90,5 @@ on which the operator runs or can be pulled from a visible registry.
**Note:** It is necessary to set the `IMAGE_CLUSTER_LOGGING_OPERATOR` environment variable to a valid pull spec
in order to run this test against local changes to the `cluster-logging-operator`. For example:
```
$ make deploy-image && IMAGE_CLUSTER_LOGGING_OPERATOR=openshift/origin-cluster-logging-operator:latest make test-e2e
$ make deploy-image && IMAGE_CLUSTER_LOGGING_OPERATOR=image-registry.openshift-image-registry.svc/openshift/origin-cluster-logging-operator:latest make test-e2e
```
2 changes: 1 addition & 1 deletion hack/common
Expand Up @@ -8,7 +8,7 @@ alias oc=${OC:-oc}
repo_dir="$(dirname $0)/.."
ELASTICSEARCH_OP_REPO=${ELASTICSEARCH_OP_REPO:-${repo_dir}/../elasticsearch-operator}

ADMIN_USER=${ADMIN_USER:-admin}
ADMIN_USER=${ADMIN_USER:-kubeadmin}
ADMIN_PSWD=${ADMIN_USER:-admin123}
REMOTE_REGISTRY=${REMOTE_REGISTRY:-false}
NAMESPACE=${NAMESPACE:-"openshift-logging"}
Expand Down
4 changes: 2 additions & 2 deletions hack/deploy-image.sh
Expand Up @@ -55,5 +55,5 @@ if [ $ii = 10 ] ; then
fi

echo "Pushing image ${tag}..."
docker login 127.0.0.1:${LOCAL_PORT} -u ${ADMIN_USER} -p $(oc whoami -t)
docker push ${tag}
docker login --tls-verify=false 127.0.0.1:${LOCAL_PORT} -u ${ADMIN_USER} -p $(oc whoami -t)
docker push --tls-verify=false ${tag}
3 changes: 2 additions & 1 deletion manifests/03-role.yaml
Expand Up @@ -81,7 +81,7 @@ rules:

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cluster-logging-operator-cluster
rules:
Expand All @@ -100,6 +100,7 @@ rules:
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
- clusterrolebindings
verbs:
- "*"
21 changes: 3 additions & 18 deletions manifests/04-role-binding.yaml
Expand Up @@ -27,31 +27,16 @@ roleRef:
kind: Role
name: cluster-logging-operator
apiGroup: rbac.authorization.k8s.io

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cluster-logging-operator-cluster-rolebinding
subjects:
- kind: ServiceAccount
name: cluster-logging-operator
namespace: openshift-logging
name: cluster-logging-operator-cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-logging-operator-cluster
apiGroup: rbac.authorization.k8s.io

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-logging-operator-cluster-reader-binding
subjects:
- kind: ServiceAccount
name: cluster-logging-operator
namespace: openshift-logging
roleRef:
kind: ClusterRole
name: cluster-reader
apiGroup: rbac.authorization.k8s.io
20 changes: 16 additions & 4 deletions pkg/k8shandler/collection.go
Expand Up @@ -241,6 +241,19 @@ func createOrUpdateCollectorServiceAccount(cluster *logging.ClusterLogging) erro
return fmt.Errorf("Failure creating Log collector privileged role binding: %v", err)
}

// create clusterrole for logcollector to retrieve metadata
clusterrules := utils.NewPolicyRules(
utils.NewPolicyRule(
[]string{""},
[]string{"pods", "namespaces"},
nil,
[]string{"get", "list", "watch"},
),
)
clusterRole, err := utils.CreateClusterRole("metadata-reader", clusterrules, cluster)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you update this to follow the other role creation patterns for now?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure what you are asking here. I added code to a reusable method that does the same thing as if it was inlined.

if err != nil {
return err
}
subject = utils.NewSubject(
"ServiceAccount",
"logcollector",
Expand All @@ -249,9 +262,8 @@ func createOrUpdateCollectorServiceAccount(cluster *logging.ClusterLogging) erro
subject.APIGroup = ""

collectorReaderClusterRoleBinding := utils.NewClusterRoleBinding(
"openshift-logging-collector-cluster-reader",
cluster.Namespace,
"cluster-reader",
"cluster-logging-metadata-reader",
clusterRole.Name,
utils.NewSubjects(
subject,
),
Expand All @@ -261,7 +273,7 @@ func createOrUpdateCollectorServiceAccount(cluster *logging.ClusterLogging) erro

err = sdk.Create(collectorReaderClusterRoleBinding)
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("Failure creating Log collector cluster-reader role binding: %v", err)
return fmt.Errorf("Failure creating Log collector %q cluster role binding: %v", collectorReaderClusterRoleBinding.Name, err)
}

return nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/utils/types.go
Expand Up @@ -331,7 +331,7 @@ func NewRoleBinding(bindingName, namespace, roleName string, subjects []rbac.Sub
}
}

func NewClusterRoleBinding(bindingName, namespace, roleName string, subjects []rbac.Subject) *rbac.ClusterRoleBinding {
func NewClusterRoleBinding(bindingName, roleName string, subjects []rbac.Subject) *rbac.ClusterRoleBinding {
return &rbac.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRoleBinding",
Expand Down
23 changes: 23 additions & 0 deletions pkg/utils/utils.go
Expand Up @@ -16,6 +16,7 @@ import (
apps "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1beta1"
core "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
Expand Down Expand Up @@ -73,6 +74,28 @@ func GetComponentImage(component string) string {
return imageTag
}

// CreateClusterRole creates a cluser role or returns error
func CreateClusterRole(name string, rules []rbac.PolicyRule, cluster *logging.ClusterLogging) (*rbac.ClusterRole, error) {
clusterRole := &rbac.ClusterRole{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRole",
APIVersion: rbac.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Rules: rules,
}

AddOwnerRefToObject(clusterRole, AsOwner(cluster))

err := sdk.Create(clusterRole)
if err != nil && !errors.IsAlreadyExists(err) {
return nil, fmt.Errorf("Failure creating '%s' clusterrole: %v", name, err)
}
return clusterRole, nil
}

func GetFileContents(filePath string) []byte {

if filePath == "" {
Expand Down
71 changes: 1 addition & 70 deletions test/e2e/clusterlogging_test.go
Expand Up @@ -8,12 +8,10 @@ import (

"github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/api/core/v1"

logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1alpha1"
framework "github.com/operator-framework/operator-sdk/pkg/test"
rbac "k8s.io/api/rbac/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

Expand Down Expand Up @@ -56,69 +54,6 @@ func TestClusterLogging(t *testing.T) {
})
}

func createRequiredClusterRoleAndBinding(f *framework.Framework, ctx *framework.TestCtx) error {
namespace, err := ctx.GetNamespace()
if err != nil {
return fmt.Errorf("Could not get namespace: %v", err)
}

clusterRole := &rbac.ClusterRole{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRole",
APIVersion: rbac.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-logging-operator-cluster",
},
Rules: []rbac.PolicyRule{
rbac.PolicyRule{
APIGroups: []string{"scheduling.k8s.io"},
Resources: []string{"priorityclasses"},
Verbs: []string{"*"},
},
rbac.PolicyRule{
APIGroups: []string{"oauth.openshift.io"},
Resources: []string{"oauthclients"},
Verbs: []string{"*"},
},
},
}

err = f.Client.Create(goctx.TODO(), clusterRole, &framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil && !errors.IsAlreadyExists(err) {
return err
}

clusterRoleBinding := &rbac.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRoleBinding",
APIVersion: rbac.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-logging-operator-cluster-rolebinding",
},
Subjects: []rbac.Subject{
rbac.Subject{
Kind: "ServiceAccount",
Name: "cluster-logging-operator",
Namespace: namespace,
},
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "cluster-logging-operator-cluster",
},
}

err = f.Client.Create(goctx.TODO(), clusterRoleBinding, &framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval})
if err != nil && !errors.IsAlreadyExists(err) {
return err
}

return nil
}

func clusterLoggingFullClusterTest(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, collector string) error {
namespace, err := ctx.GetNamespace()
if err != nil {
Expand Down Expand Up @@ -224,10 +159,6 @@ func waitForOperatorToBeReady(t *testing.T, ctx *framework.TestCtx) error {
return err
}

if err = createRequiredClusterRoleAndBinding(f, ctx); err != nil {
return err
}

return nil
}

Expand Down