Skip to content
This repository was archived by the owner on Dec 12, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@ run: install install-rbac
eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \
go run ./cmd/manager/main.go

debug: install install-rbac
eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \
dlv debug ./cmd/manager/main.go

# Install CRDs into a cluster
install: manifests helm install-crd

Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ The MongoDB Community Kubernetes Operator supports the following features:
- Secure client-to-server and server-to-server connections with TLS
- Create users with [SCRAM](https://docs.mongodb.com/manual/core/security-scram/) authentication
- Create custom roles
- Enable a [metrics target that can be used with Prometheus](docs/prometheus/README.md)

### Planned Features
- Server internal authentication via keyfile
Expand Down
20 changes: 14 additions & 6 deletions api/v1/mongodbcommunity_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ const (
type Phase string

const (
Running Phase = "Running"
Failed Phase = "Failed"
Pending Phase = "Pending"
)
Running Phase = "Running"
Failed Phase = "Failed"
Pending Phase = "Pending"
defaultPasswordKey = "password"

const (
defaultPasswordKey = "password"
// Keep in sync with controllers/prometheus.go
defaultPrometheusPort = 9216
)

// SCRAM-SHA-256 and SCRAM-SHA-1 are the supported auth modes.
Expand Down Expand Up @@ -161,6 +161,14 @@ func (p Prometheus) GetPasswordKey() string {
return "password"
}

func (p Prometheus) GetPort() int {
if p.Port != 0 {
return p.Port
}

return defaultPrometheusPort
}

// ConvertToAutomationConfigCustomRole converts between a custom role defined by the crd and a custom role
// that can be used in the automation config.
func (c CustomRole) ConvertToAutomationConfigCustomRole() automationconfig.CustomRole {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ spec:
# Optional defaults to 9216
# port: 9216

# Prometheus endpoint can be configured to use HTTPS
# tlsSecretKeyRef:
# name: "<kubernetes.io/tls secret name>"

security:
authentication:
modes: ["SCRAM"]
Expand Down
21 changes: 19 additions & 2 deletions controllers/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package controllers
import (
"fmt"

corev1 "k8s.io/api/core/v1"

mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1"
"github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig"
"github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret"
Expand All @@ -12,7 +14,9 @@ import (
)

const (
listenAddress = "0.0.0.0"
// Keep in sync with api/v1/mongodbcommunity_types.go
DefaultPrometheusPort = 9216
ListenAddress = "0.0.0.0"
)

// PrometheusModification adds Prometheus configuration to AutomationConfig.
Expand Down Expand Up @@ -50,7 +54,7 @@ func getPrometheusModification(getUpdateCreator secret.GetUpdateCreator, mdb mdb
promConfig.Password = password

if mdb.Spec.Prometheus.Port > 0 {
promConfig.ListenAddress = fmt.Sprintf("%s:%d", listenAddress, mdb.Spec.Prometheus.Port)
promConfig.ListenAddress = fmt.Sprintf("%s:%d", ListenAddress, mdb.Spec.Prometheus.Port)
}

if mdb.Spec.Prometheus.MetricsPath != "" {
Expand All @@ -60,3 +64,16 @@ func getPrometheusModification(getUpdateCreator secret.GetUpdateCreator, mdb mdb
config.Prometheus = &promConfig
}, nil
}

// prometheusPort returns a `corev1.ServicePort` to be configured in the StatefulSet
// for the Prometheus endpoint. This function will only return a new Port when
// Prometheus has been configured, and nil otherwise.
func prometheusPort(mdb mdbv1.MongoDBCommunity) *corev1.ServicePort {
if mdb.Spec.Prometheus != nil {
return &corev1.ServicePort{
Port: int32(mdb.Spec.Prometheus.GetPort()),
Name: "prometheus",
}
}
return nil
}
14 changes: 12 additions & 2 deletions controllers/replica_set_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -543,15 +543,25 @@ func buildService(mdb mdbv1.MongoDBCommunity, isArbiter bool) corev1.Service {
SetName(name).
SetNamespace(mdb.Namespace).
SetSelector(label).
SetLabels(label).
SetServiceType(corev1.ServiceTypeClusterIP).
SetClusterIP("None").
SetPort(int32(mdb.GetMongodConfiguration().GetDBPort())).
SetPortName("mongodb").
SetPublishNotReadyAddresses(true).
SetOwnerReferences(mdb.GetOwnerReferences()).
AddPort(mongoDBPort(mdb)).
AddPort(prometheusPort(mdb)).
Build()
}

// mongoDBPort returns a `corev1.ServicePort` to be configured in the StatefulSet
// for this MongoDB resource.
func mongoDBPort(mdb mdbv1.MongoDBCommunity) *corev1.ServicePort {
return &corev1.ServicePort{
Port: int32(mdb.GetMongodConfiguration().GetDBPort()),
Name: "mongodb",
}
}

// validateSpec checks if the MongoDB resource Spec is valid.
// If there has not yet been a successful configuration, the function runs the intial Spec validations. Otherwise
// it checks that the attempted Spec is valid in relation to the Spec that resulted from that last successful configuration.
Expand Down
75 changes: 75 additions & 0 deletions controllers/replicaset_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,81 @@ func TestService_usesCustomMongodPortWhenSpecified(t *testing.T) {
assertReconciliationSuccessful(t, res, err)
}

func TestService_configuresPrometheusCustomPorts(t *testing.T) {
mdb := newTestReplicaSet()
mdb.Spec.Prometheus = &mdbv1.Prometheus{
Username: "username",
PasswordSecretRef: mdbv1.SecretKeyReference{
Name: "secret",
},
Port: 4321,
}

mongodConfig := objx.New(map[string]interface{}{})
mongodConfig.Set("net.port", 1000.)
mdb.Spec.AdditionalMongodConfig.Object = mongodConfig

mgr := client.NewManager(&mdb)
err := secret.CreateOrUpdate(mgr.Client,
secret.Builder().
SetName("secret").
SetNamespace(mdb.Namespace).
SetField("password", "my-password").
Build(),
)

assert.NoError(t, err)
r := NewReconciler(mgr)
res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}})
assertReconciliationSuccessful(t, res, err)

svc := corev1.Service{}
err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc)
assert.NoError(t, err)
assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP)
assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName())
assert.Len(t, svc.Spec.Ports, 2)
assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 1000, Name: "mongodb"})
assert.Equal(t, svc.Spec.Ports[1], corev1.ServicePort{Port: 4321, Name: "prometheus"})

assert.Equal(t, svc.Labels["app"], mdb.ServiceName())

res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}})
assertReconciliationSuccessful(t, res, err)
}

func TestService_configuresPrometheus(t *testing.T) {
mdb := newTestReplicaSet()
mdb.Spec.Prometheus = &mdbv1.Prometheus{
Username: "username",
PasswordSecretRef: mdbv1.SecretKeyReference{
Name: "secret",
},
}

mgr := client.NewManager(&mdb)
err := secret.CreateOrUpdate(mgr.Client,
secret.Builder().
SetName("secret").
SetNamespace(mdb.Namespace).
SetField("password", "my-password").
Build(),
)
assert.NoError(t, err)

r := NewReconciler(mgr)
res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}})
assertReconciliationSuccessful(t, res, err)

svc := corev1.Service{}
err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc)
assert.NoError(t, err)

assert.Len(t, svc.Spec.Ports, 2)
assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 27017, Name: "mongodb"})
assert.Equal(t, svc.Spec.Ports[1], corev1.ServicePort{Port: 9216, Name: "prometheus"})
}

func TestCustomNetPort_Configuration(t *testing.T) {
svc, _ := performReconciliationAndGetService(t, "specify_net_port.yaml")
assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP)
Expand Down
148 changes: 148 additions & 0 deletions docs/prometheus/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
# Using Prometheus with your MongoDB Resource

We have added a sample yaml file that you could use to deploy a MongoDB resource
in your Kubernetes cluster, with a
[`ServiceMonitor`](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#related-resources)
to indicate Prometheus how to consume metrics data from it.

This is a simple MongoDB resource with one user, and with the `spec.Prometheus`
attribute with basic HTTP Auth and no TLS, that will allow you to test
Prometheus metrics coming from MongoDB.

## Quick Start

We have tested this setup with version 0.54 of the [Prometheus
Operator](https://github.com/prometheus-operator/prometheus-operator).

### Installing Prometheus Operator

The Prometheus Operator can be installed using Helm. Find the installation
instructions
[here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#kube-prometheus-stack):

This can be done with:

``` shell
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
helm install prometheus prometheus-community/kube-prometheus-stack --namespace prometheus-system --create-namespace
```

### Installing MongoDB

*Change after release to a proper Helm install.*

* Create a Namespace to hold our MongoDB Operator and Resources

``` shell
kubectl create namespace mongodb
```

* Follow the [Installation Instructions](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/install-upgrade.md#operator-in-same-namespace-as-resources)

## Creating a MongoDB Resource

We have created a sample yaml definition that you can use to create a MongoDB
resource and a `ServiceMonitor` that will indicate Prometheus to start scraping
its metrics information.

You can apply it directly with:

``` shell
kubectl apply -f mongodb-prometheus-sample.yaml
```

This will create 2 `Secrets` containing authentication for a new MongoDB user
and Basic HTTP Auth for the Prometheus endpoint. All of this in the `mongodb`
Namespace.

It will also create a `ServiceMonitor` that will configure Prometheus to consume
this resurce's metrics. This will be created in the `prometheus-system`
namespace.


## Bonus: Enable TLS on the Prometheus Endpoint

### Installing Cert-Manager

We will install [Cert-Manager](https://cert-manager.io/) from using Helm.

``` shell
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.7.1 \
--set installCRDs=true
```

Now with Cert-Manager installed we we'll create a Cert-Manager `Issuer` and then
a `Certificate`. We provide 2 files that can be used to create a new `Issuer`.

First we need to create a `Secret` holding a TLS certificate `tls.crt` and
`tls.key` entries. We provide the certificate and key files that can be used to
create a Cert-Manager `Certificate`, they are in the `testdata/tls` directory.

``` shell
$ kubectl create secret tls issuer-secret --cert=../../testdata/tls/ca.crt --key=../../testdata/tls/ca.key \
--namespace mongodb
secret/issuer-secret created
```

And now we are ready to create a new `Issuer` and `Certificate`, by running the
following command:

``` shell
$ kubectl apply -f issuer-and-cert.yaml --namespace mongodb
issuer.cert-manager.io/ca-issuer created
certificate.cert-manager.io/prometheus-target-cert created
```

### Enabling TLS on the MongoDB CRD

<center>_Make sure this configuration is not used in Production environments! A Security
expert should be advising you on how to configure TLS_</center>

We need to add a new entry to `spec.prometheus` section of the MongoDB
`CustomResource`; we can do it executing the following
[patch](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/)
operation.

``` shell
$ kubectl patch mdbc mongodb --type='json' \
-p='[{"op": "add", "path": "/spec/prometheus/tlsSecretKeyRef", "value":{"name": "prometheus-target-cert"}}]' \
--namespace mongodb

mongodbcommunity.mongodbcommunity.mongodb.com/mongodb patched
```

After a few minutes, the MongoDB resource should be back in Running phase. Now
we need to configure our Prometheus `ServiceMonitor` to point at the HTTPS
endpoint.

### Update ServiceMonitor

To update our `ServiceMonitor` we will again patch the resource:

``` shell
$ kubectl patch servicemonitors mongodb-sm --type='json' \
-p='
[
{"op": "replace", "path": "/spec/endpoints/0/scheme", "value": "https"},
{"op": "add", "path": "/spec/endpoints/0/tlsConfig", "value": {"insecureSkipVerify": true}}
]
' \
--namespace mongodb

servicemonitor.monitoring.coreos.com/mongodb-sm patched
```

With these changes, the new `ServiceMonitor` will be pointing at the HTTPS
endpoint (defined in `/spec/endpoints/0/scheme`). We are also setting
`spec/endpoints/0/tlsConfig/insecureSkipVerify` to `true`, which will make
Prometheus to not verify TLS certificates on MongoDB's end.

Prometheus should now be able to scrape the MongoDB's target using HTTPS this
time.
Loading