Skip to content
Merged
34 changes: 17 additions & 17 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ The full documentation for the Operator can be found [here](https://docs.atlas.m
kubectl apply -f https://raw.githubusercontent.com/mongodb/mongodb-atlas-kubernetes/main/deploy/all-in-one.yaml
```

### Step 2. Create Atlas Cluster
### Step 2. Create Atlas Deployment

**1.** Create an Atlas API Key Secret

Expand All @@ -39,7 +39,7 @@ kubectl label secret mongodb-atlas-operator-api-key atlas.mongodb.com/type=crede
**2.** Create an `AtlasProject` Custom Resource

The `AtlasProject` CustomResource represents Atlas Projects in our Kubernetes cluster. You need to specify
`projectIpAccessList` with the IP addresses or CIDR blocks of any hosts that will connect to the Atlas Cluster.
`projectIpAccessList` with the IP addresses or CIDR blocks of any hosts that will connect to the Atlas Deployment.

```
cat <<EOF | kubectl apply -f -
Expand All @@ -59,7 +59,7 @@ EOF

**3.** Create an `AtlasDeployment` Custom Resource.

The example below is a minimal configuration to create an M10 Atlas cluster in the AWS US East region. For a full list
The example below is a minimal configuration to create an M10 Atlas deployment in the AWS US East region. For a full list
of properties, check
`atlasdeployments.atlas.mongodb.com` [CRD specification](config/crd/bases/atlas.mongodb.com_atlasdeployments.yaml)):

Expand All @@ -68,12 +68,12 @@ cat <<EOF | kubectl apply -f -
apiVersion: atlas.mongodb.com/v1
kind: AtlasDeployment
metadata:
name: my-atlas-cluster
name: my-atlas-deployment
spec:
projectRef:
name: my-project
clusterSpec:
name: "Test-cluster"
deploymentSpec:
name: Test Deployment
providerSettings:
instanceSizeName: M10
providerName: AWS
Expand All @@ -93,7 +93,7 @@ kubectl label secret the-user-password atlas.mongodb.com/type=credentials

**5.** Create an `AtlasDatabaseUser` Custom Resource

In order to connect to an Atlas Cluster the database user needs to be created. `AtlasDatabaseUser` resource should
In order to connect to an Atlas Deployment the database user needs to be created. `AtlasDatabaseUser` resource should
reference the password Kubernetes Secret created in the previous step.

```
Expand All @@ -116,29 +116,29 @@ EOF

**6.** Wait for the `AtlasDatabaseUser` Custom Resource to be ready

Wait until the AtlasDatabaseUser resource gets to "ready" status (it will wait until the cluster is created that may
Wait until the AtlasDatabaseUser resource gets to "ready" status (it will wait until the deployment is created that may
take around 10 minutes):

```
kubectl get atlasdatabaseusers my-database-user -o=jsonpath='{.status.conditions[?(@.type=="Ready")].status}'
True
```

### Step 3. Connect your application to the Atlas Cluster
### Step 3. Connect your application to the Atlas Deployment

The Atlas Operator will create a Kubernetes Secret with the information necessary to connect to the Atlas Cluster
The Atlas Operator will create a Kubernetes Secret with the information necessary to connect to the Atlas Deployment
created in the previous step. An application in the same Kubernetes Cluster can mount and use the Secret:

```
...
containers:
- name: test-app
env:
- name: "CONNECTION_STRING"
valueFrom:
secretKeyRef:
name: test-atlas-operator-project-test-cluster-theuser
key: connectionStringStandardSrv
- name: test-app
env:
- name: "CONNECTION_STRING"
valueFrom:
secretKeyRef:
name: test-atlas-operator-project-test-cluster-theuser
key: connectionStringStandardSrv

```

Expand Down
24 changes: 12 additions & 12 deletions cmd/post-install/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func setupLogger() *zap.SugaredLogger {
return log.Sugar()
}

// createK8sClient creates an in cluster client which can be used to fetch the current state of the AtlasDeployment
// createK8sClient creates a client which can be used to fetch the current state of the AtlasDeployment
// resource.
func createK8sClient() (client.Client, error) {
restCfg, err := rest.InClusterConfig()
Expand All @@ -50,9 +50,9 @@ func createK8sClient() (client.Client, error) {
return k8sClient, nil
}

// isClusterReady returns a boolean indicating if the cluster has reached the ready state and is
// isDeploymentReady returns a boolean indicating if the deployment has reached the ready state and is
// ready to be used.
func isClusterReady(logger *zap.SugaredLogger) (bool, error) {
func isDeploymentReady(logger *zap.SugaredLogger) (bool, error) {
k8sClient, err := createK8sClient()
if err != nil {
return false, err
Expand All @@ -61,7 +61,7 @@ func isClusterReady(logger *zap.SugaredLogger) (bool, error) {
ticker := time.NewTicker(pollingInterval)
defer ticker.Stop()

clusterName := os.Getenv("CLUSTER_NAME")
deploymentName := os.Getenv("DEPLOYMENT_NAME")
namespace := os.Getenv("NAMESPACE")

totalTime := time.Duration(0)
Expand All @@ -71,18 +71,18 @@ func isClusterReady(logger *zap.SugaredLogger) (bool, error) {
}
totalTime += pollingInterval

atlasCluster := mdbv1.AtlasDeployment{}
if err := k8sClient.Get(context.TODO(), kube.ObjectKey(namespace, clusterName), &atlasCluster); err != nil {
atlasDeployment := mdbv1.AtlasDeployment{}
if err := k8sClient.Get(context.TODO(), kube.ObjectKey(namespace, deploymentName), &atlasDeployment); err != nil {
return false, err
}

// the atlas project has reached the ClusterReady state.
for _, cond := range atlasCluster.Status.Conditions {
if cond.Type == status.ClusterReadyType {
// the atlas project has reached the DeploymentReady state.
for _, cond := range atlasDeployment.Status.Conditions {
if cond.Type == status.DeploymentReadyType {
if cond.Status == corev1.ConditionTrue {
return true, nil
}
logger.Infof("Atlas Cluster %s is not yet ready", atlasCluster.Name)
logger.Infof("Atlas Deployment %s is not yet ready", atlasDeployment.Name)
}
}
}
Expand All @@ -92,14 +92,14 @@ func isClusterReady(logger *zap.SugaredLogger) (bool, error) {
func main() {
logger := setupLogger()

clusterIsReady, err := isClusterReady(logger)
deploymentIsReady, err := isDeploymentReady(logger)
if err != nil {
logger.Error(err)
os.Exit(1)
}

exitCode := 1
if clusterIsReady {
if deploymentIsReady {
exitCode = 0
}
os.Exit(exitCode)
Expand Down
Loading