diff --git a/.github/workflows/ci_chart_test.yml b/.github/workflows/ci_chart_test.yml
index 6a4fa755677..7ce9f52d2db 100644
--- a/.github/workflows/ci_chart_test.yml
+++ b/.github/workflows/ci_chart_test.yml
@@ -47,8 +47,6 @@ jobs:
strategy:
matrix:
kubernetes-version:
- - 'kindest/node:v1.14.10'
- - 'kindest/node:v1.18.20'
- 'kindest/node:v1.21.10'
- 'kindest/node:v1.23.4'
steps:
diff --git a/docker/docker-compose/docker-compose.yml b/docker/docker-compose/docker-compose.yml
index 102ff27504e..9365b15bbc3 100644
--- a/docker/docker-compose/docker-compose.yml
+++ b/docker/docker-compose/docker-compose.yml
@@ -39,6 +39,7 @@ services:
container_name: pulsar
ports:
- "6650:6650"
+ - "8080:8080"
volumes:
- ./pulsar/data:/pulsar/data
command: bin/pulsar standalone
diff --git a/docker/kubernetes/Chart.yaml b/docker/kubernetes/Chart.yaml
index a8af351e23f..f6672e53191 100644
--- a/docker/kubernetes/Chart.yaml
+++ b/docker/kubernetes/Chart.yaml
@@ -29,11 +29,12 @@ keywords:
# The chart version number should be incremented each time you make changes
# to the chart and its templates.
-version: 2.0.2
+# x.y.z format, x.y keep same with the InLong version
+version: 1.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
-appVersion: 1.2.0
+appVersion: 1.6.0-SNAPSHOT
maintainers:
- name: dockerzhang
diff --git a/docker/kubernetes/templates/NOTES.txt b/docker/kubernetes/templates/NOTES.txt
index bbbe0b08c25..af3ba947b62 100644
--- a/docker/kubernetes/templates/NOTES.txt
+++ b/docker/kubernetes/templates/NOTES.txt
@@ -27,23 +27,23 @@
{{- else if eq .Values.dashboard.service.type "ClusterIP" }}
- $ export DASHBOARD_POD_NAME=$(sudo kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.dashboard.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
- $ export DASHBOARD_CONTAINER_PORT=$(sudo kubectl get pod $DASHBOARD_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
- $ sudo kubectl port-forward $DASHBOARD_POD_NAME 8181:$DASHBOARD_CONTAINER_PORT -n {{ .Release.Namespace }}
+ $ export DASHBOARD_POD_NAME=$(kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.dashboard.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
+ $ export DASHBOARD_CONTAINER_PORT=$(kubectl get pod $DASHBOARD_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
+ $ kubectl port-forward $DASHBOARD_POD_NAME 8181:$DASHBOARD_CONTAINER_PORT -n {{ .Release.Namespace }}
$ echo "InLong Dashboard URL: http://127.0.0.1:8181"
{{- else if eq .Values.dashboard.service.type "NodePort" }}
- $ export DASHBOARD_NODE_IP=$(sudo kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
- $ export DASHBOARD_NODE_PORT=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dashboard.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
+ $ export DASHBOARD_NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
+ $ export DASHBOARD_NODE_PORT=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dashboard.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
$ echo "InLong Dashboard URL: http://$DASHBOARD_NODE_IP:$DASHBOARD_NODE_PORT"
{{- else if eq .Values.dashboard.service.type "LoadBalancer" }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
- You can check the status by running 'sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dashboard.component }} -n {{ .Release.Namespace }} -w'
+ You can check the status by running 'kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dashboard.component }} -n {{ .Release.Namespace }} -w'
- $ export DASHBOARD_SERVICE_IP=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dashboard.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
+ $ export DASHBOARD_SERVICE_IP=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dashboard.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
$ echo "http://$DASHBOARD_SERVICE_IP:{{ .Values.dashboard.service.nodePort }}"
{{- end }}
@@ -56,23 +56,23 @@
{{- else if eq .Values.manager.service.type "ClusterIP" }}
- $ export MANAGER_POD_NAME=$(sudo kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.manager.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
- $ export MANAGER_CONTAINER_PORT=$(sudo kubectl get pod $MANAGER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
- $ sudo kubectl port-forward $MANAGER_POD_NAME 8182:$MANAGER_CONTAINER_PORT -n {{ .Release.Namespace }}
+ $ export MANAGER_POD_NAME=$(kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.manager.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
+ $ export MANAGER_CONTAINER_PORT=$(kubectl get pod $MANAGER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
+ $ kubectl port-forward $MANAGER_POD_NAME 8182:$MANAGER_CONTAINER_PORT -n {{ .Release.Namespace }}
$ echo "InLong Manager URL: http://127.0.0.1:8182"
{{- else if eq .Values.manager.service.type "NodePort" }}
- $ export MANAGER_NODE_IP=$(sudo kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
- $ export MANAGER_NODE_PORT=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.manager.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
+ $ export MANAGER_NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
+ $ export MANAGER_NODE_PORT=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.manager.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
$ echo "InLong Manager URL: http://$MANAGER_NODE_IP:$MANAGER_NODE_PORT"
{{- else if eq .Values.manager.service.type "LoadBalancer" }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
- You can check the status by running 'sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.manager.component }} -n {{ .Release.Namespace }} -w'
+ You can check the status by running 'kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.manager.component }} -n {{ .Release.Namespace }} -w'
- $ export MANAGER_SERVICE_IP=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.manager.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
+ $ export MANAGER_SERVICE_IP=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.manager.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
$ echo "InLong Manager URL: http://$MANAGER_SERVICE_IP:{{ .Values.manager.service.nodePort }}"
{{- end }}
@@ -85,23 +85,23 @@
{{- else if eq .Values.dataproxy.service.type "ClusterIP" }}
- $ export DATA_PROXY_POD_NAME=$(sudo kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.dataproxy.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
- $ export DATA_PROXY_CONTAINER_PORT=$(sudo kubectl get pod $DATA_PROXY_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
- $ sudo kubectl port-forward $DATA_PROXY_POD_NAME 8183:$DATA_PROXY_CONTAINER_PORT -n {{ .Release.Namespace }}
+ $ export DATA_PROXY_POD_NAME=$(kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.dataproxy.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
+ $ export DATA_PROXY_CONTAINER_PORT=$(kubectl get pod $DATA_PROXY_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
+ $ kubectl port-forward $DATA_PROXY_POD_NAME 8183:$DATA_PROXY_CONTAINER_PORT -n {{ .Release.Namespace }}
$ echo "InLong DataProxy URL: http://127.0.0.1:8183"
{{- else if eq .Values.dataproxy.service.type "NodePort" }}
- $ export DATA_PROXY_NODE_IP=$(sudo kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
- $ export DATA_PROXY_NODE_PORT=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dataproxy.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
+ $ export DATA_PROXY_NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
+ $ export DATA_PROXY_NODE_PORT=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dataproxy.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
$ echo "InLong DataProxy URL: http://$DATA_PROXY_NODE_IP:$DATA_PROXY_NODE_PORT"
{{- else if eq .Values.dataproxy.service.type "LoadBalancer" }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
- You can check the status by running 'sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dataproxy.component }} -n {{ .Release.Namespace }} -w'
+ You can check the status by running 'kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dataproxy.component }} -n {{ .Release.Namespace }} -w'
- $ export DATA_PROXY_SERVICE_IP=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dataproxy.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
+ $ export DATA_PROXY_SERVICE_IP=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.dataproxy.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
$ echo "InLong DataProxy URL: http://$DATA_PROXY_SERVICE_IP:{{ .Values.dataproxy.service.nodePort }}"
{{- end }}
@@ -114,23 +114,23 @@
{{- else if eq .Values.tubemqMaster.service.type "ClusterIP" }}
- $ export TUBEMQ_MASTER_POD_NAME=$(sudo kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.tubemqMaster.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
- $ export TUBEMQ_MASTER_CONTAINER_PORT=$(sudo kubectl get pod $TUBEMQ_MASTER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
- $ sudo kubectl port-forward $TUBEMQ_MASTER_POD_NAME 8183:$TUBEMQ_MASTER_CONTAINER_PORT -n {{ .Release.Namespace }}
+ $ export TUBEMQ_MASTER_POD_NAME=$(kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.tubemqMaster.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
+ $ export TUBEMQ_MASTER_CONTAINER_PORT=$(kubectl get pod $TUBEMQ_MASTER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
+ $ kubectl port-forward $TUBEMQ_MASTER_POD_NAME 8183:$TUBEMQ_MASTER_CONTAINER_PORT -n {{ .Release.Namespace }}
$ echo "InLong TubeMQ Master URL: http://127.0.0.1:8183"
{{- else if eq .Values.tubemqMaster.service.type "NodePort" }}
- $ export TUBEMQ_MASTER_NODE_IP=$(sudo kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
- $ export TUBEMQ_MASTER_NODE_PORT=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqMaster.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
+ $ export TUBEMQ_MASTER_NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
+ $ export TUBEMQ_MASTER_NODE_PORT=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqMaster.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
$ echo "InLong TubeMQ Master URL: http://$TUBEMQ_MASTER_NODE_IP:$TUBEMQ_MASTER_NODE_PORT"
{{- else if eq .Values.tubemqMaster.service.type "LoadBalancer" }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
- You can check the status by running 'sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqMaster.component }} -n {{ .Release.Namespace }} -w'
+ You can check the status by running 'kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqMaster.component }} -n {{ .Release.Namespace }} -w'
- $ export TUBEMQ_MASTER_SERVICE_IP=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqMaster.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
+ $ export TUBEMQ_MASTER_SERVICE_IP=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqMaster.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
$ echo "InLong TubeMQ Master URL: http://$TUBEMQ_MASTER_SERVICE_IP:{{ .Values.tubemqMaster.service.webNodePort }}"
{{- end }}
@@ -143,38 +143,38 @@
{{- else if eq .Values.tubemqBroker.service.type "ClusterIP" }}
- $ export TUBEMQ_BROKER_POD_NAME=$(sudo kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.tubemqBroker.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
- $ export TUBEMQ_BROKER_CONTAINER_PORT=$(sudo kubectl get pod $TUBEMQ_BROKER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
- $ sudo kubectl port-forward $TUBEMQ_BROKER_POD_NAME 8183:$TUBEMQ_BROKER_CONTAINER_PORT -n {{ .Release.Namespace }}
+ $ export TUBEMQ_BROKER_POD_NAME=$(kubectl get pods -l "app.kubernetes.io/name={{ template "inlong.name" . }}-{{ .Values.tubemqBroker.component }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}" -n {{ .Release.Namespace }})
+ $ export TUBEMQ_BROKER_CONTAINER_PORT=$(kubectl get pod $TUBEMQ_BROKER_POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}" -n {{ .Release.Namespace }})
+ $ kubectl port-forward $TUBEMQ_BROKER_POD_NAME 8183:$TUBEMQ_BROKER_CONTAINER_PORT -n {{ .Release.Namespace }}
$ echo "InLong TubeMQ Broker URL: http://127.0.0.1:8183"
{{- else if eq .Values.tubemqBroker.service.type "NodePort" }}
- $ export TUBEMQ_BROKER_NODE_IP=$(sudo kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
- $ export TUBEMQ_BROKER_NODE_PORT=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqBroker.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
+ $ export TUBEMQ_BROKER_NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}" -n {{ .Release.Namespace }})
+ $ export TUBEMQ_BROKER_NODE_PORT=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqBroker.component }} -o jsonpath="{.spec.ports[0].nodePort}" -n {{ .Release.Namespace }})
$ echo "InLong TubeMQ Broker URL: http://$TUBEMQ_BROKER_NODE_IP:$TUBEMQ_BROKER_NODE_PORT"
{{- else if eq .Values.tubemqBroker.service.type "LoadBalancer" }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
- You can check the status by running 'sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqBroker.component }} -n {{ .Release.Namespace }} -w'
+ You can check the status by running 'kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqBroker.component }} -n {{ .Release.Namespace }} -w'
- $ export TUBEMQ_BROKER_SERVICE_IP=$(sudo kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqBroker.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
+ $ export TUBEMQ_BROKER_SERVICE_IP=$(kubectl get svc {{ template "inlong.fullname" . }}-{{ .Values.tubemqBroker.component }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}" -n {{ .Release.Namespace }})
$ echo "InLong TubeMQ Broker URL: http://$TUBEMQ_BROKER_SERVICE_IP:{{ .Values.tubemqBroker.service.webNodePort }}"
{{- end }}
To learn more about the release, try:
- $ sudo helm status {{ .Release.Name }} -n {{ .Release.Namespace }}
- $ sudo helm get all {{ .Release.Name }} -n {{ .Release.Namespace }}
+ $ helm status {{ .Release.Name }} -n {{ .Release.Namespace }}
+ $ helm get all {{ .Release.Name }} -n {{ .Release.Namespace }}
To uninstall the release, try:
- $ sudo helm uninstall {{ .Release.Name }} -n {{ .Release.Namespace }}
+ $ helm uninstall {{ .Release.Name }} -n {{ .Release.Namespace }}
To delete all PVC if any persistent volume claims used, try:
- $ sudo kubectl delete pvc -n {{ .Release.Namespace }} --all
+ $ kubectl delete pvc -n {{ .Release.Namespace }} --all
For more details, please check out https://inlong.apache.org/docs/next/deployment/k8s
diff --git a/docker/kubernetes/templates/agent-statefulset.yaml b/docker/kubernetes/templates/agent-statefulset.yaml
index 70864e65d42..8dadd8d2604 100644
--- a/docker/kubernetes/templates/agent-statefulset.yaml
+++ b/docker/kubernetes/templates/agent-statefulset.yaml
@@ -58,18 +58,18 @@ spec:
{{- end }}
terminationGracePeriodSeconds: {{ .Values.agent.terminationGracePeriodSeconds }}
initContainers:
- - name: wait-{{ .Values.dashboard.component }}-ready
+ - name: wait-{{ .Values.manager.component }}-ready
image: {{ .Values.images.initContainer.repository }}:{{ .Values.images.initContainer.tag }}
imagePullPolicy: {{ .Values.images.pullPolicy }}
command: [ "/bin/sh", "-c" ]
args:
- |
- count={{ .Values.dashboard.replicas }}
+ count={{ .Values.manager.replicas }}
for i in $(seq 0 $(expr $count - 1))
do
- replica="{{ template "inlong.fullname" . }}-{{ .Values.dashboard.component }}-$i"
- host="$replica.{{ template "inlong.dashboard.hostname" . }}"
- port={{ .Values.dashboard.port }}
+ replica="{{ template "inlong.fullname" . }}-{{ .Values.manager.component }}-$i"
+ host="$replica.{{ template "inlong.manager.hostname" . }}"
+ port={{ .Values.manager.port }}
until nc -z $host $port 2>/dev/null
do
echo "waiting for $replica to be ready"
diff --git a/docker/kubernetes/templates/zookeeper-configmap.yaml b/docker/kubernetes/templates/zookeeper-configmap.yaml
index 8de0a388f53..0452a502904 100644
--- a/docker/kubernetes/templates/zookeeper-configmap.yaml
+++ b/docker/kubernetes/templates/zookeeper-configmap.yaml
@@ -50,6 +50,5 @@ data:
export ZOO_MY_ID=$MY_POD_ID
export ZOO_DATA_DIR="/data/zoo_data/"
export ZOO_DATA_LOG_DIR="/data/zoo_log/"
- export SERVER_JVMFLAGS="-Xmx1g -Xms1g"
/docker-entrypoint.sh zkServer.sh start-foreground
{{- end }}
diff --git a/inlong-audit/audit-common/pom.xml b/inlong-audit/audit-common/pom.xml
index da8c62fd24a..4e150143e11 100644
--- a/inlong-audit/audit-common/pom.xml
+++ b/inlong-audit/audit-common/pom.xml
@@ -26,7 +26,7 @@
1.6.0-SNAPSHOT
audit-common
- Apache InLong - Audit common
+ Apache InLong - Audit Common
diff --git a/inlong-audit/audit-sdk/pom.xml b/inlong-audit/audit-sdk/pom.xml
index 71b467e63bb..66e3e57b2b1 100644
--- a/inlong-audit/audit-sdk/pom.xml
+++ b/inlong-audit/audit-sdk/pom.xml
@@ -27,7 +27,7 @@
audit-sdk
jar
- Apache InLong - Audit Sdk
+ Apache InLong - Audit SDK