From f22f158d3eddfa9d718c146c09d4ab0dfa89cc00 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Tue, 4 Nov 2025 11:33:47 +0100 Subject: [PATCH 1/3] revert/remove headless suffix from headless service --- CHANGELOG.md | 5 ++--- .../hdfs/examples/getting_started/getting_started.sh | 6 +++--- .../hdfs/examples/getting_started/getting_started.sh.j2 | 6 +++--- rust/operator-binary/src/crd/mod.rs | 2 +- rust/operator-binary/src/hdfs_controller.rs | 2 +- rust/operator-binary/src/service.rs | 2 +- tests/templates/kuttl/orphaned-resources/04-assert.yaml | 2 +- tests/templates/kuttl/profiling/run-profiler.py | 2 +- tests/templates/kuttl/smoke/30-assert.yaml.j2 | 6 +++--- tests/templates/kuttl/smoke/test_jmx_metrics.py | 4 ++-- tests/templates/kuttl/smoke/webhdfs.py | 4 ++-- 11 files changed, 20 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 808db9db..94ebebe6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,9 +15,8 @@ All notable changes to this project will be documented in this file. ### Changed -- BREAKING: Renamed headless rolegroup service from `--` to `---headless` ([#721]). - - The `prometheus.io/scrape` label was moved to the metrics service - - The headless service now only exposes product / data ports, the metrics service only metrics ports +- The `prometheus.io/scrape` label was moved to the metrics service ([#721]). +- The headless service now only exposes product / data ports, the metrics service only metrics ports ([#721]). - Bump stackable-operator to `0.100.1` and product-config to `0.8.0` ([#722]). [#713]: https://github.com/stackabletech/hdfs-operator/pull/713 diff --git a/docs/modules/hdfs/examples/getting_started/getting_started.sh b/docs/modules/hdfs/examples/getting_started/getting_started.sh index 0bd94982..5d530e27 100755 --- a/docs/modules/hdfs/examples/getting_started/getting_started.sh +++ b/docs/modules/hdfs/examples/getting_started/getting_started.sh @@ -116,7 +116,7 @@ kubectl rollout status --watch --timeout=5m statefulset/webhdfs file_status() { # tag::file-status[] - kubectl exec -n default webhdfs-0 -- curl -s -XGET "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/?op=LISTSTATUS" + kubectl exec -n default webhdfs-0 -- curl -s -XGET "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/?op=LISTSTATUS" # end::file-status[] } @@ -138,7 +138,7 @@ kubectl cp -n default ./testdata.txt webhdfs-0:/tmp create_file() { # tag::create-file[] kubectl exec -n default webhdfs-0 -- \ - curl -s -XPUT -T /tmp/testdata.txt "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE&noredirect=true" + curl -s -XPUT -T /tmp/testdata.txt "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE&noredirect=true" # end::create-file[] } @@ -157,7 +157,7 @@ echo "Created file: $found_file with status $(file_status)" echo "Delete file" delete_file() { # tag::delete-file[] - kubectl exec -n default webhdfs-0 -- curl -s -XDELETE "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=DELETE" + kubectl exec -n default webhdfs-0 -- curl -s -XDELETE "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=DELETE" # end::delete-file[] } diff --git a/docs/modules/hdfs/examples/getting_started/getting_started.sh.j2 b/docs/modules/hdfs/examples/getting_started/getting_started.sh.j2 index f1490c14..fe83e8d8 100755 --- a/docs/modules/hdfs/examples/getting_started/getting_started.sh.j2 +++ b/docs/modules/hdfs/examples/getting_started/getting_started.sh.j2 @@ -116,7 +116,7 @@ kubectl rollout status --watch --timeout=5m statefulset/webhdfs file_status() { # tag::file-status[] - kubectl exec -n default webhdfs-0 -- curl -s -XGET "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/?op=LISTSTATUS" + kubectl exec -n default webhdfs-0 -- curl -s -XGET "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/?op=LISTSTATUS" # end::file-status[] } @@ -138,7 +138,7 @@ kubectl cp -n default ./testdata.txt webhdfs-0:/tmp create_file() { # tag::create-file[] kubectl exec -n default webhdfs-0 -- \ - curl -s -XPUT -T /tmp/testdata.txt "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE&noredirect=true" + curl -s -XPUT -T /tmp/testdata.txt "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE&noredirect=true" # end::create-file[] } @@ -157,7 +157,7 @@ echo "Created file: $found_file with status $(file_status)" echo "Delete file" delete_file() { # tag::delete-file[] - kubectl exec -n default webhdfs-0 -- curl -s -XDELETE "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default-headless.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=DELETE" + kubectl exec -n default webhdfs-0 -- curl -s -XDELETE "http://simple-hdfs-namenode-default-0.simple-hdfs-namenode-default.default.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=DELETE" # end::delete-file[] } diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index 96cdb1fe..276a810a 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -391,7 +391,7 @@ impl v1alpha1::HdfsCluster { let ns = ns.clone(); (0..*replicas).map(move |i| HdfsPodRef { namespace: ns.clone(), - role_group_service_name: rolegroup_ref.rolegroup_headless_service_name(), + role_group_service_name: rolegroup_ref.object_name(), pod_name: format!("{}-{}", rolegroup_ref.object_name(), i), ports: self .data_ports(role) diff --git a/rust/operator-binary/src/hdfs_controller.rs b/rust/operator-binary/src/hdfs_controller.rs index 2b7b1e07..c1340552 100644 --- a/rust/operator-binary/src/hdfs_controller.rs +++ b/rust/operator-binary/src/hdfs_controller.rs @@ -887,7 +887,7 @@ fn rolegroup_statefulset( match_labels: Some(rolegroup_selector_labels.into()), ..LabelSelector::default() }, - service_name: Some(rolegroup_ref.rolegroup_headless_service_name()), + service_name: Some(rolegroup_ref.object_name()), template: pod_template, volume_claim_templates: Some(pvcs), diff --git a/rust/operator-binary/src/service.rs b/rust/operator-binary/src/service.rs index bac5cdca..64073eca 100644 --- a/rust/operator-binary/src/service.rs +++ b/rust/operator-binary/src/service.rs @@ -48,7 +48,7 @@ pub(crate) fn rolegroup_headless_service( let mut metadata_builder = ObjectMetaBuilder::new(); metadata_builder .name_and_namespace(hdfs) - .name(rolegroup_ref.rolegroup_headless_service_name()) + .name(rolegroup_ref.object_name()) .ownerreference_from_resource(hdfs, None, Some(true)) .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { obj_ref: ObjectRef::from_obj(hdfs), diff --git a/tests/templates/kuttl/orphaned-resources/04-assert.yaml b/tests/templates/kuttl/orphaned-resources/04-assert.yaml index 6bf19d60..2ec7d018 100644 --- a/tests/templates/kuttl/orphaned-resources/04-assert.yaml +++ b/tests/templates/kuttl/orphaned-resources/04-assert.yaml @@ -21,4 +21,4 @@ metadata: apiVersion: v1 kind: Service metadata: - name: test-hdfs-datanode-newrolegroup-headless + name: test-hdfs-datanode-newrolegroup diff --git a/tests/templates/kuttl/profiling/run-profiler.py b/tests/templates/kuttl/profiling/run-profiler.py index ad46a3de..717fe8ef 100644 --- a/tests/templates/kuttl/profiling/run-profiler.py +++ b/tests/templates/kuttl/profiling/run-profiler.py @@ -55,7 +55,7 @@ def fetch_flamegraph(service_url, refresh_path): def test_profiling(role, port): service_url = ( - f"http://test-hdfs-{role}-default-0.test-hdfs-{role}-default-headless:{port}" + f"http://test-hdfs-{role}-default-0.test-hdfs-{role}-default:{port}" ) print(f"Test profiling on {service_url}") diff --git a/tests/templates/kuttl/smoke/30-assert.yaml.j2 b/tests/templates/kuttl/smoke/30-assert.yaml.j2 index e685aa05..2f17591a 100644 --- a/tests/templates/kuttl/smoke/30-assert.yaml.j2 +++ b/tests/templates/kuttl/smoke/30-assert.yaml.j2 @@ -80,7 +80,7 @@ status: apiVersion: v1 kind: Service metadata: - name: hdfs-namenode-default-headless + name: hdfs-namenode-default spec: ports: - name: rpc @@ -110,7 +110,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: hdfs-datanode-default-headless + name: hdfs-datanode-default spec: ports: - name: data @@ -144,7 +144,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: hdfs-journalnode-default-headless + name: hdfs-journalnode-default spec: ports: - name: rpc diff --git a/tests/templates/kuttl/smoke/test_jmx_metrics.py b/tests/templates/kuttl/smoke/test_jmx_metrics.py index f88437c4..9f508022 100755 --- a/tests/templates/kuttl/smoke/test_jmx_metrics.py +++ b/tests/templates/kuttl/smoke/test_jmx_metrics.py @@ -65,9 +65,9 @@ def check_datanode_metrics( # Kind "FSDatasetState" 'hadoop_datanode_capacity{fsdatasetid=".+",kind="FSDatasetState",role="DataNode",service="HDFS"}', # Kind "DataNodeActivity" suffixed with "_info" - 'hadoop_datanode_blocks_get_local_path_info_{host="hdfs-datanode-default-\\d+\\.hdfs-datanode-default-headless\\..+\\.svc\\.cluster\\.local",kind="DataNodeActivity",port="9866",role="DataNode",service="HDFS"}', + 'hadoop_datanode_blocks_get_local_path_info_{host="hdfs-datanode-default-\\d+\\.hdfs-datanode-default\\..+\\.svc\\.cluster\\.local",kind="DataNodeActivity",port="9866",role="DataNode",service="HDFS"}', # Kind "DataNodeActivity" - 'hadoop_datanode_blocks_read{host="hdfs-datanode-default-\\d+\\.hdfs-datanode-default-headless\\..+\\.svc\\.cluster\\.local",kind="DataNodeActivity",port="9866",role="DataNode",service="HDFS"}', + 'hadoop_datanode_blocks_read{host="hdfs-datanode-default-\\d+\\.hdfs-datanode-default\\..+\\.svc\\.cluster\\.local",kind="DataNodeActivity",port="9866",role="DataNode",service="HDFS"}', # Counter suffixed with "_total" 'hadoop_datanode_estimated_capacity_lost_total{kind="FSDatasetState",role="DataNode",service="HDFS"}', # Boolean metric diff --git a/tests/templates/kuttl/smoke/webhdfs.py b/tests/templates/kuttl/smoke/webhdfs.py index b0ccb40c..d7bb4c3f 100755 --- a/tests/templates/kuttl/smoke/webhdfs.py +++ b/tests/templates/kuttl/smoke/webhdfs.py @@ -17,7 +17,7 @@ def main() -> int: if command == "ls": http_code = requests.get( - f"http://hdfs-namenode-default-0.hdfs-namenode-default-headless.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=LISTSTATUS" + f"http://hdfs-namenode-default-0.hdfs-namenode-default.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=LISTSTATUS" ).status_code if http_code != 200: result = 1 @@ -31,7 +31,7 @@ def main() -> int: ) } http_code = requests.put( - f"http://hdfs-namenode-default-0.hdfs-namenode-default-headless.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE", + f"http://hdfs-namenode-default-0.hdfs-namenode-default.{namespace}.svc.cluster.local:9870/webhdfs/v1/testdata.txt?user.name=stackable&op=CREATE", files=files, allow_redirects=True, ).status_code From ac5a1494834e1202b90fe95721d1c8fa71570a3f Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 4 Nov 2025 13:03:10 +0100 Subject: [PATCH 2/3] changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94ebebe6..954cb5fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,13 +16,14 @@ All notable changes to this project will be documented in this file. ### Changed - The `prometheus.io/scrape` label was moved to the metrics service ([#721]). -- The headless service now only exposes product / data ports, the metrics service only metrics ports ([#721]). +- The headless service now only exposes product / data ports, the metrics service only metrics ports ([#721], [#726]). - Bump stackable-operator to `0.100.1` and product-config to `0.8.0` ([#722]). [#713]: https://github.com/stackabletech/hdfs-operator/pull/713 [#718]: https://github.com/stackabletech/hdfs-operator/pull/718 [#721]: https://github.com/stackabletech/hdfs-operator/pull/721 [#722]: https://github.com/stackabletech/hdfs-operator/pull/722 +[#726]: https://github.com/stackabletech/hdfs-operator/pull/726 ## [25.7.0] - 2025-07-23 From 0ebaf153c06880d6c4047329e0286982a1039806 Mon Sep 17 00:00:00 2001 From: Sebastian Bernauer Date: Tue, 4 Nov 2025 13:04:45 +0100 Subject: [PATCH 3/3] python linter --- tests/templates/kuttl/profiling/run-profiler.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/templates/kuttl/profiling/run-profiler.py b/tests/templates/kuttl/profiling/run-profiler.py index 717fe8ef..39c8a30b 100644 --- a/tests/templates/kuttl/profiling/run-profiler.py +++ b/tests/templates/kuttl/profiling/run-profiler.py @@ -54,9 +54,7 @@ def fetch_flamegraph(service_url, refresh_path): def test_profiling(role, port): - service_url = ( - f"http://test-hdfs-{role}-default-0.test-hdfs-{role}-default:{port}" - ) + service_url = f"http://test-hdfs-{role}-default-0.test-hdfs-{role}-default:{port}" print(f"Test profiling on {service_url}")