Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
Please ensure all items are complete before opening.

- [ ] Tick to sign-off your agreement to the [Developer's Certificate of Origin](https://github.com/ibm-messaging/mq-helm/DCO1.1.txt)
- [ ] You have added README updates for any code changes
- [ ] You have completed the PR template below:

## What

What was changed

## How

How the change was implemented or reasoning behind it

## Testing

How to test your changes work, not required for documentation changes.

## Issues

Links to the github issue(s) (if present) that this pull request is resolving.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,5 @@ samples/AzureAKSMultiInstance/deploy/mtlsqm.yaml
samples/AzureAKSMultiInstance/test/ccdt_generated.json
samples/OpenShiftNativeHA/deploy/mtlsqm.yaml
samples/OpenShiftNativeHA/test/ccdt_generated.json
samples/AWSEKSCrossRegionReplication/deploy/mtlsqm.yaml
samples/AWSEKSCrossRegionReplication/test/ccdt_generated.json
2 changes: 1 addition & 1 deletion charts/ibm-mq/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
apiVersion: v2
name: ibm-mq
description: IBM MQ queue manager
version: 12.0.0
version: 12.0.1
type: application
appVersion: 9.4.2.0
kubeVersion: ">=1.18.0-0"
Expand Down
1 change: 1 addition & 0 deletions charts/ibm-mq/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ Alternatively, each parameter can be specified by using the `--set key=value[,ke
| `image.tag` | Image tag | `9.4.2.0-r1` |
| `image.pullPolicy` | Setting that controls when the kubelet attempts to pull the specified image. | `IfNotPresent` |
| `image.pullSecret` | An optional list of references to secrets in the same namespace to use for pulling any of the images used by this QueueManager. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honoured. For more information, see [here](https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod) | `nil` |
| `image.disableDefaultPullSecret` | By default the ibm-entitlement-key is specified within the created service account. If this should be removed set this property to `true` | `false` |
| `credentials.enable` | Enable MQ to utilize credentials from a Secret for the default "app" and "admin" users. MQ no longer sets a default password for these users, so it is highly recommended to set your own by creating a Secret. | `false` |
| `credentials.secret` | Provide the name of a Secret that contains keys "mqAdminPassword" and "mqAppPassword" with passwords as their respective values. This Secret will be mounted into MQ. | `mq-credentials` |
| `metadata.labels` | The labels field serves as a pass-through for Pod labels. Users can add any label to this field and have it apply to the Pod. | `{}` |
Expand Down
6 changes: 6 additions & 0 deletions charts/ibm-mq/templates/NOTES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,9 @@ Get the NativeHA CRR Traffic URL to configure the replication address, by runnin
export HACRR_ROUTE=$(kubectl get route {{ include "ibm-mq.fullname" . }}-nhacrr -n {{ .Release.Namespace }} -o jsonpath="{.spec.host}")
echo $HACRR_ROUTE\(443\)
{{ end -}}

{{- if and .Values.route.loadBalancer.hacrrtraffic .Values.queueManager.nativeha.nativehaGroup }}
Get the NativeHA CRR Traffic URL to configure the replication address, by running these commands:
export HACRR_ADDRESS=$(kubectl get services {{ include "ibm-mq.fullname" . }}-loadbalancer-hacrr -n {{ .Release.Namespace }} -o jsonpath="{..hostname}")$(kubectl get services {{ include "ibm-mq.fullname" . }}-loadbalancer-hacrr -n {{ .Release.Namespace }} -o jsonpath="{..ip}")
echo $HACRR_ADDRESS\(9415\)
{{ end -}}
20 changes: 10 additions & 10 deletions charts/ibm-mq/templates/service-ingress.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © Copyright IBM Corporation 2023
# © Copyright IBM Corporation 2023, 2025
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -30,16 +30,16 @@ spec:
service:
name: {{ include "ibm-mq.fullname" . }}
port:
number: 9443
name: console-https
ingressClassName: nginx
rules:
{{- if .Values.route.ingress.webconsole.hostname }}
- host: {{ .Values.route.ingress.webconsole.hostname }}
http:
{{- else }}
- http:
- http:
{{- end }}
paths:
paths:
{{- if .Values.route.ingress.webconsole.path }}
- path: {{ .Values.route.ingress.webconsole.path }}
{{- else }}
Expand All @@ -48,19 +48,19 @@ spec:
pathType: Prefix
backend:
service:
name: {{ include "ibm-mq.fullname" . }}-web
name: {{ include "ibm-mq.fullname" . }}
port:
number: 9443
name: console-https
{{- if .Values.route.ingress.webconsole.tls.enable }}
tls:
{{- if .Values.route.ingress.webconsole.hostname}}
{{- if .Values.route.ingress.webconsole.hostname }}
- hosts:
- {{ .Values.route.ingress.webconsole.hostname }}
{{- if .Values.route.ingress.webconsole.tls.secret}}
- {{ .Values.route.ingress.webconsole.hostname }}
{{- if .Values.route.ingress.webconsole.tls.secret }}
secretName: {{ .Values.route.ingress.webconsole.tls.secret }}
{{- end }}
{{- else }}
- secretName: {{ .Values.route.ingress.webconsole.tls.secret }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
47 changes: 47 additions & 0 deletions charts/ibm-mq/templates/service-loadbalancer-hacrr.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
{{- if (.Values.route.loadBalancer.hacrrtraffic) }}
# © Copyright IBM Corporation 2025
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A seperate load balancer to MQ data and web console is used. On certain
# platforms, such as AWS EKS, if all ports are combined it does not work.
# This is due to port 1414 not being open in recovery mode. Therefore a
# individual loadbalancer for Native HA CRR is created.

apiVersion: v1
kind: Service
metadata:
name: {{ include "ibm-mq.fullname" . }}-loadbalancer-hacrr
labels:
{{- include "ibm-mq.labels" . | nindent 4 }}
# Additional annotations to be added to the load balancer annotations
{{- with .Values.route.loadBalancer.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: LoadBalancer
ports:
{{- if .Values.route.loadBalancer.hacrrtraffic }}
- port: 9415
name: ha-crr
{{- end }}
{{- if .Values.route.loadBalancer.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- range $group := .Values.route.loadBalancer.loadBalancerSourceRanges }}
- {{ $group -}}
{{ end }}
{{- end }}
selector:
{{- include "ibm-mq.selectorLabels" . | nindent 4 }}
{{- end }}
2 changes: 1 addition & 1 deletion charts/ibm-mq/templates/service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ spec:
name: console-https
- port: 1414
name: qmgr
{{- if .Values.route.openShiftRoute.hacrrtraffic }}
{{- if .Values.queueManager.nativeha.nativehaGroup }}
- port: 9415
name: ha-crr
{{- end }}
Expand Down
8 changes: 7 additions & 1 deletion charts/ibm-mq/templates/serviceaccount.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © Copyright IBM Corporation 2021
# © Copyright IBM Corporation 2021, 2025
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -17,8 +17,14 @@ metadata:
name: {{ include "ibm-mq.fullname" ( . ) }}
labels:
{{- include "ibm-mq.labels" . | nindent 4 }}
{{- if not .Values.image.disableDefaultPullSecret }}
imagePullSecrets:
{{- else if .Values.queueManager.multiinstance.enable }}
imagePullSecrets:
{{- end }}
{{- if not .Values.image.disableDefaultPullSecret }}
- name: ibm-entitlement-key
{{- end }}
{{- if .Values.image.pullSecret }}
- name: {{ .Values.image.pullSecret }}
{{- end }}
6 changes: 4 additions & 2 deletions charts/ibm-mq/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ image:
pullSecret:
# pullPolicy is either IfNotPresent or Always (https://kubernetes.io/docs/concepts/containers/images/)
pullPolicy: IfNotPresent
# disableDefaultPullSecret to use when pulling the image from a private registry and the ibm-entitlement-key is not required
disableDefaultPullSecret: false

# set passwords for users: "admin" and "app"
credentials:
Expand All @@ -40,7 +42,7 @@ metadata:
# persistence section specifies persistence settings which apply to the whole chart
persistence:

# dataPVC section specifies settings for the main Persistent Volume Claim, which is used for data in /var/mqm -> /mnt/mqm
# dataPVC section specifies settings for the main Persistent Volume Claim, which is used for data in /mnt/mqm-data
dataPVC:
# enabled is whether to use this Persistent Volumes or not
enable: false
Expand All @@ -62,7 +64,7 @@ persistence:
## storageClass to use for this PVCs
storageClassName: ""

# dataPVC section specifies settings for the main Persistent Volume Claim, which is used for data in /mnt/mqm-data
# qmPVC section specifies settings for the main Persistent Volume Claim, which is used for data in /mnt/mqm
qmPVC:
# enabled is whether to use this Persistent Volumes or not
enable: true
Expand Down
42 changes: 42 additions & 0 deletions samples/AWSEKSCrossRegionReplication/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Deploying IBM MQ Native HA Cross Region Replication on AWS EKS
This sample uses [IBM MQ Native HA Cross Region Replication](https://www.ibm.com/docs/en/ibm-mq/9.4.0?topic=containers-native-ha-cross-region-replication) to setup a queue manager with high availability and disaster recovery across AWS regions. To reduce the infrastructure required the cross region setup is simulated by having two namespaces within the same AWS EKS cluster. If two AWS EKS clusters in separate regions are available the reader can easily change the sample to support that setup.

Within this sample we will deploy two Native HA deployments which are configured for replication. These two deployments work together to provide a single queue manager which is highly available within a region, and resilient across regions. Once configured, a sample message will be put on the queue manager in region 1. The deployment in region 1 will be removed and a switch over to region 2 completed. The message will then be received in region 2 showing the data has been replicated.


## Pre-reqs
Prior to using the Helm chart you will need to install four dependencies:
1. [Helm version 3](https://helm.sh/docs/intro/install/)
2. [Kubectl](https://kubernetes.io/docs/tasks/tools/)
3. [AWS Command Line](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html)
4. Assure your AWS EKS Security Group allows communication on the assigned NodePorts.


## Installation
1. Log into the AWS EKS cluster using the `aws` command line. If you are unsure how to do this please consult [here](https://aws.amazon.com/premiumsupport/knowledge-center/eks-cluster-connection/).
1. Change directories to *deploy*: `cd deploy`
1. Run the installation command to deploy an instance in the recovery namespace: `./installRegion2.sh <RecoveryNamespace>`
Where \<RecoveryNamespace\> is the Kubernetes namespace where the resources should be deployed into. By default a new namespace called `region2` is created. This will deploy a number of resources:
* The IBM MQ Helm Chart using the properties within the [secureapp_nativeha.yaml_template](deploy/secureapp_nativeha.yaml_template) file. The `installRegion2.sh` script sets the `nativehaGroup` parameter to `qm-recovery`
* A configMap with MQ configuration to define a default Queue, and the security required.
* A secret that includes certificates and keys from the `genericresources/createcerts` directory. Assuring the communication in MQ is secure.
1. This will take a minute or so to deploy, and the status can be checked with the following command: `kubectl get pods | grep secureapp`. Wait until one of the three Pods is showing `1/1` under the ready status (only one will ever show this, the remainding two will be `0/1` showing they are replicas).
1. Run the installation command to deploy an in the live namespace: `./installRegion1.sh <LiveNamespace\> \<RecoveryNamespace\>`
Where \<LiveNamespace\> is the Kubernetes namespace where the resources should be deployed into. By default a new namespace called `region1` is created. \<RecoveryNamespace\> is the Kubernetes namespace where the recovery resources have already been deployed. This will deploy a number of resources:
* The IBM MQ Helm Chart using the properties within the [secureapp_nativeha.yaml_template](deploy/secureapp_nativeha.yaml_template) file. The `installRegion1.sh` script sets the `nativehaGroup` parameter to `qm-live`, and the `address` parameter to the corresponding location for region2.
* A configMap with MQ configuration to define a default Queue, and the security required.
* A secret that includes certificates and keys from the `genericresources/createcerts` directory. Assuring the communication in MQ is secure.
1. This will take a minute or so to deploy, and the status can be checked with the following command: `kubectl get pods | grep secureapp`. Wait until one of the three Pods is showing `1/1` under the ready status (only one will ever show this, the remainding two will be `0/1` showing they are replicas).

## Testing
The prerequisite is that the IBM MQ is installed under `/opt/mqm` directory or binaries (Redistributable client) are available in the same path on the host machine on which the testing is carried out.

Navigate to *../test* directory. No modifications should be required, as the endpoint configuration for your environment will be discovered automatically.

1. To initiate the testing, run the **./sendMessageRegion1.sh \<LiveNamespace\>** command. It will then connect to MQ. Type in a message such as `Message from Region1` and press enter twice.

1. Run the **./deleteRegion1SwitchToRegion2.sh \<LiveNamespace\> \<RecoveryNamespace\>** command. You should see all of the messages being sent by the sendMessaging command.

1. Run **./getMessageRegion2.sh \<RecoveryNamespace\>** to retrieve the original message.

1. You can clean up the resources by navigating to the *../deploy* directory and running the command **./cleanup.sh \<LiveNamespace\> \<RecoveryNamespace\>**. This will delete everything. Do not worry if you receive messages about PVCs not being found, this is a generic clean-up script and assumes a worst case scenario.
49 changes: 49 additions & 0 deletions samples/AWSEKSCrossRegionReplication/deploy/cleanup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#! /bin/bash
# © Copyright IBM Corporation 2025
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

export LIVE_TARGET_NAMESPACE=${1:-"region1"}
export RECOVERY_TARGET_NAMESPACE=${2:-"region2"}

kubectl config set-context --current --namespace=$LIVE_TARGET_NAMESPACE
helm delete secureapphelm
kubectl delete secret helmsecure -n $LIVE_TARGET_NAMESPACE
kubectl delete secret nha-crr-secret-live -n $LIVE_TARGET_NAMESPACE
kubectl delete secret nha-crr-secret-recovery -n $LIVE_TARGET_NAMESPACE
kubectl delete configmap helmsecure -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc data-secureapphelm-ibm-mq-0 -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc data-secureapphelm-ibm-mq-1 -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc data-secureapphelm-ibm-mq-2 -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc log-secureapphelm-ibm-mq-0 -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc log-secureapphelm-ibm-mq-1 -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc log-secureapphelm-ibm-mq-2 -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc qm-secureapphelm-ibm-mq-0 -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc qm-secureapphelm-ibm-mq-1 -n $LIVE_TARGET_NAMESPACE
kubectl delete pvc qm-secureapphelm-ibm-mq-2 -n $LIVE_TARGET_NAMESPACE

kubectl config set-context --current --namespace=$RECOVERY_TARGET_NAMESPACE
helm delete secureapphelm
kubectl delete secret helmsecure -n $RECOVERY_TARGET_NAMESPACE
kubectl delete secret nha-crr-secret-recovery -n $RECOVERY_TARGET_NAMESPACE
kubectl delete secret nha-crr-secret-live -n $RECOVERY_TARGET_NAMESPACE
kubectl delete configmap helmsecure -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc data-secureapphelm-ibm-mq-0 -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc data-secureapphelm-ibm-mq-1 -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc data-secureapphelm-ibm-mq-2 -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc log-secureapphelm-ibm-mq-0 -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc log-secureapphelm-ibm-mq-1 -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc log-secureapphelm-ibm-mq-2 -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc qm-secureapphelm-ibm-mq-0 -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc qm-secureapphelm-ibm-mq-1 -n $RECOVERY_TARGET_NAMESPACE
kubectl delete pvc qm-secureapphelm-ibm-mq-2 -n $RECOVERY_TARGET_NAMESPACE
Loading