diff --git a/CHANGELOG.md b/CHANGELOG.md index b9ff1eebe..640cecd2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ All notable changes to this project will be documented in this file. - hadoop: Add `3.4.2` ([#1291]). - zookeeper: Add `3.9.4` ([#1292]). - nifi: Add `2.6.0` ([#1293]). +- hbase: Add `2.6.3` ([#1296]). ### Changed @@ -55,6 +56,7 @@ All notable changes to this project will be documented in this file. - spark: Remove `4.0.0` ([#1286]). - spark-connect-client: Remove `4.0.0` ([#1286]). - trino/trino-storage-conector: Remove `470` ([#1285]). +- hbase: Remove `2.6.1` ([#1296]). [#1207]: https://github.com/stackabletech/docker-images/pull/1207 [#1215]: https://github.com/stackabletech/docker-images/pull/1215 @@ -84,6 +86,7 @@ All notable changes to this project will be documented in this file. [#1291]: https://github.com/stackabletech/docker-images/pull/1291 [#1292]: https://github.com/stackabletech/docker-images/pull/1292 [#1293]: https://github.com/stackabletech/docker-images/pull/1293 +[#1296]: https://github.com/stackabletech/docker-images/pull/1296 ## [25.7.0] - 2025-07-23 diff --git a/hbase/README.md b/hbase/README.md index fcdd00ae3..ed78e2e1f 100644 --- a/hbase/README.md +++ b/hbase/README.md @@ -1,6 +1,6 @@ # Support for HBase 2.6 -As of SDP 24.7 we do include HBase 2.6 support in an experimental state. +As of SDP release 25.3, HBase 2.6.x is fully supported. ## Phoenix @@ -11,16 +11,4 @@ SDP 24.7 included Phoenix built from the master branch from commit [4afe457](htt Repository: [hbase-operator-tools](https://github.com/apache/hbase-operator-tools) -Built from git hash [7c738fc](https://github.com/apache/hbase-operator-tools/tree/7c738fc1bd14fd3e2ca4e66569b496b3fd9d0288) (master) -since no release supporting HBase 2.6 available yet. - -```bash -mkdir ../hbase-operator-tools-1.3.0-7c738fc -git archive --format=tar --output ../hbase-operator-tools-1.3.0-7c738fc/hot.tar 7c738fc -cd ../hbase-operator-tools-1.3.0-7c738fc -tar xf hot.tar -rm hot.tar -echo 7c738fc > git-commit -cd .. -tar -c hbase-operator-tools-1.3.0-7c738fc|gzip > hbase-operator-tools-1.3.0-7c738fc-src.tar.gz -``` +This is now mirrored and built from source using `patchable`. diff --git a/hbase/boil-config.toml b/hbase/boil-config.toml index 2aa8bd793..8b62185cc 100644 --- a/hbase/boil-config.toml +++ b/hbase/boil-config.toml @@ -1,25 +1,25 @@ -[versions."2.6.1".local-images] -"hbase/hbase" = "2.6.1" -"hbase/hbase-operator-tools" = "1.3.0-fd5a5fb-hbase2.6.1" -"hbase/phoenix" = "5.2.1-hbase2.6.1" +[versions."2.6.2".local-images] +"hbase/hbase" = "2.6.2" +"hbase/hbase-operator-tools" = "1.3.0-fd5a5fb-hbase2.6.2" +"hbase/phoenix" = "5.2.1-hbase2.6.2" "hbase/hbase-opa-authorizer" = "0.1.0" # only for HBase 2.6.1 -"hadoop/hadoop" = "3.3.6" +"hadoop/hadoop" = "3.4.1" java-base = "11" java-devel = "11" -[versions."2.6.1".build-arguments] +[versions."2.6.2".build-arguments] hbase-profile = "2.6" delete-caches = "true" -[versions."2.6.2".local-images] -"hbase/hbase" = "2.6.2" -"hbase/hbase-operator-tools" = "1.3.0-fd5a5fb-hbase2.6.2" -"hbase/phoenix" = "5.2.1-hbase2.6.2" +[versions."2.6.3".local-images] +"hbase/hbase" = "2.6.3" +"hbase/hbase-operator-tools" = "1.3.0-fd5a5fb-hbase2.6.3" +"hbase/phoenix" = "5.2.1-hbase2.6.3" "hbase/hbase-opa-authorizer" = "0.1.0" # only for HBase 2.6.1 -"hadoop/hadoop" = "3.4.1" +"hadoop/hadoop" = "3.4.2" java-base = "11" java-devel = "11" -[versions."2.6.2".build-arguments] +[versions."2.6.3".build-arguments] hbase-profile = "2.6" delete-caches = "true" diff --git a/hbase/hbase-operator-tools/boil-config.toml b/hbase/hbase-operator-tools/boil-config.toml index 92570b2a5..30c0d5bf3 100644 --- a/hbase/hbase-operator-tools/boil-config.toml +++ b/hbase/hbase-operator-tools/boil-config.toml @@ -1,19 +1,19 @@ -[versions."1.3.0-fd5a5fb-hbase2.6.1".local-images] -"hadoop/hadoop" = "3.3.6" -"hbase/hbase" = "2.6.1" +[versions."1.3.0-fd5a5fb-hbase2.6.2".local-images] +"hadoop/hadoop" = "3.4.1" +"hbase/hbase" = "2.6.2" "java-devel" = "11" -[versions."1.3.0-fd5a5fb-hbase2.6.1".build-arguments] +[versions."1.3.0-fd5a5fb-hbase2.6.2".build-arguments] hbase-operator-tools-version = "1.3.0-fd5a5fb" hbase-thirdparty-version = "4.1.9" delete-caches = "true" -[versions."1.3.0-fd5a5fb-hbase2.6.2".local-images] -"hadoop/hadoop" = "3.4.1" -"hbase/hbase" = "2.6.2" +[versions."1.3.0-fd5a5fb-hbase2.6.3".local-images] +"hadoop/hadoop" = "3.4.2" +"hbase/hbase" = "2.6.3" "java-devel" = "11" -[versions."1.3.0-fd5a5fb-hbase2.6.2".build-arguments] +[versions."1.3.0-fd5a5fb-hbase2.6.3".build-arguments] hbase-operator-tools-version = "1.3.0-fd5a5fb" hbase-thirdparty-version = "4.1.9" delete-caches = "true" diff --git a/hbase/hbase/boil-config.toml b/hbase/hbase/boil-config.toml index 289cdfb13..aaf3adec1 100644 --- a/hbase/hbase/boil-config.toml +++ b/hbase/hbase/boil-config.toml @@ -1,17 +1,17 @@ -[versions."2.6.1".local-images] -"hadoop/hadoop" = "3.3.6" +[versions."2.6.2".local-images] +"hadoop/hadoop" = "3.4.1" java-base = "11" java-devel = "11" -[versions."2.6.1".build-arguments] +[versions."2.6.2".build-arguments] async-profiler-version = "2.9" delete-caches = "true" -[versions."2.6.2".local-images] -"hadoop/hadoop" = "3.4.1" +[versions."2.6.3".local-images] +"hadoop/hadoop" = "3.4.2" java-base = "11" java-devel = "11" -[versions."2.6.2".build-arguments] +[versions."2.6.3".build-arguments] async-profiler-version = "2.9" delete-caches = "true" diff --git a/hbase/hbase/stackable/patches/2.6.3/0001-HBASE-28242-Updates-async-profiler-support.patch b/hbase/hbase/stackable/patches/2.6.3/0001-HBASE-28242-Updates-async-profiler-support.patch new file mode 100644 index 000000000..ec113d0b4 --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.3/0001-HBASE-28242-Updates-async-profiler-support.patch @@ -0,0 +1,385 @@ +From ea347e13277748d6414b867237337baf7d156851 Mon Sep 17 00:00:00 2001 +From: Siegfried Weber +Date: Tue, 6 Feb 2024 16:10:54 +0100 +Subject: HBASE-28242: Updates async-profiler support + +--- + .../hadoop/hbase/http/ProfileServlet.java | 207 +++++++++++------- + 1 file changed, 122 insertions(+), 85 deletions(-) + +diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java +index 122d04ff17..5c2df0076d 100644 +--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java ++++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java +@@ -28,9 +28,11 @@ import java.util.concurrent.TimeUnit; + import java.util.concurrent.atomic.AtomicInteger; + import java.util.concurrent.locks.Lock; + import java.util.concurrent.locks.ReentrantLock; ++ + import javax.servlet.http.HttpServlet; + import javax.servlet.http.HttpServletRequest; + import javax.servlet.http.HttpServletResponse; ++ + import org.apache.hadoop.hbase.util.ProcessUtils; + import org.apache.yetus.audience.InterfaceAudience; + import org.slf4j.Logger; +@@ -40,23 +42,60 @@ import org.apache.hbase.thirdparty.com.google.common.base.Joiner; + + /** + * Servlet that runs async-profiler as web-endpoint. Following options from async-profiler can be +- * specified as query paramater. // -e event profiling event: cpu|alloc|lock|cache-misses etc. // -d +- * duration run profiling for 'duration' seconds (integer) // -i interval sampling interval in +- * nanoseconds (long) // -j jstackdepth maximum Java stack depth (integer) // -b bufsize frame +- * buffer size (long) // -t profile different threads separately // -s simple class names instead of +- * FQN // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html // --width +- * px SVG width pixels (integer) // --height px SVG frame height pixels (integer) // --minwidth px +- * skip frames smaller than px (double) // --reverse generate stack-reversed FlameGraph / Call tree +- * Example: - To collect 30 second CPU profile of current process (returns FlameGraph svg) curl +- * "http://localhost:10002/prof" - To collect 1 minute CPU profile of current process and output in +- * tree format (html) curl "http://localhost:10002/prof?output=tree&duration=60" - To collect 30 +- * second heap allocation profile of current process (returns FlameGraph svg) curl +- * "http://localhost:10002/prof?event=alloc" - To collect lock contention profile of current process +- * (returns FlameGraph svg) curl "http://localhost:10002/prof?event=lock" Following event types are +- * supported (default is 'cpu') (NOTE: not all OS'es support all events) // Perf events: // cpu // +- * page-faults // context-switches // cycles // instructions // cache-references // cache-misses // +- * branches // branch-misses // bus-cycles // L1-dcache-load-misses // LLC-load-misses // +- * dTLB-load-misses // mem:breakpoint // trace:tracepoint // Java events: // alloc // lock ++ * specified as query parameter. ++ * ++ * Example: ++ * ++ * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all ++ * events).
++ * Basic events: ++ * ++ * Perf events: ++ * + */ + @InterfaceAudience.Private + public class ProfileServlet extends HttpServlet { +@@ -81,19 +120,20 @@ public class ProfileServlet extends HttpServlet { + WALL("wall"), + ALLOC("alloc"), + LOCK("lock"), +- PAGE_FAULTS("page-faults"), +- CONTEXT_SWITCHES("context-switches"), +- CYCLES("cycles"), +- INSTRUCTIONS("instructions"), +- CACHE_REFERENCES("cache-references"), +- CACHE_MISSES("cache-misses"), +- BRANCHES("branches"), ++ ITIMER("itimer"), ++ BRANCH_INSTRUCTIONS("branch-instructions"), + BRANCH_MISSES("branch-misses"), + BUS_CYCLES("bus-cycles"), ++ CACHE_MISSES("cache-misses"), ++ CACHE_REFERENCES("cache-references"), ++ CONTEXT_SWITCHES("context-switches"), ++ CYCLES("cycles"), ++ DTLB_LOAD_MISSES("dTLB-load-misses"), ++ INSTRUCTIONS("instructions"), + L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"), + LLC_LOAD_MISSES("LLC-load-misses"), +- DTLB_LOAD_MISSES("dTLB-load-misses"), + MEM_BREAKPOINT("mem:breakpoint"), ++ PAGE_FAULTS("page-faults"), + TRACE_TRACEPOINT("trace:tracepoint"),; + + private final String internalName; +@@ -102,11 +142,11 @@ public class ProfileServlet extends HttpServlet { + this.internalName = internalName; + } + +- public String getInternalName() { ++ String getInternalName() { + return internalName; + } + +- public static Event fromInternalName(final String name) { ++ static Event fromInternalName(final String name) { + for (Event event : values()) { + if (event.getInternalName().equalsIgnoreCase(name)) { + return event; +@@ -117,30 +157,26 @@ public class ProfileServlet extends HttpServlet { + } + } + +- enum Output { +- SUMMARY, +- TRACES, +- FLAT, ++ private enum Output { + COLLAPSED, +- // No SVG in 2.x asyncprofiler. +- SVG, +- TREE, ++ FLAMEGRAPH, ++ FLAT, + JFR, +- // In 2.x asyncprofiler, this is how you get flamegraphs. +- HTML ++ TRACES, ++ TREE + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED", + justification = "This class is never serialized nor restored.") +- private transient Lock profilerLock = new ReentrantLock(); ++ private final transient Lock profilerLock = new ReentrantLock(); + private transient volatile Process process; +- private String asyncProfilerHome; ++ private final String asyncProfilerHome; + private Integer pid; + + public ProfileServlet() { + this.asyncProfilerHome = getAsyncProfilerHome(); + this.pid = ProcessUtils.getPid(); +- LOG.info("Servlet process PID: " + pid + " asyncProfilerHome: " + asyncProfilerHome); ++ LOG.info("Servlet process PID: {} asyncProfilerHome: {}", pid, asyncProfilerHome); + } + + @Override +@@ -159,9 +195,9 @@ public class ProfileServlet extends HttpServlet { + setResponseHeader(resp); + resp.getWriter() + .write("ASYNC_PROFILER_HOME env is not set.\n\n" +- + "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" ++ + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" +- + "http://hbase.apache.org/book.html#profiler\n"); ++ + "https://hbase.apache.org/book.html#profiler\n"); + return; + } + +@@ -177,18 +213,18 @@ public class ProfileServlet extends HttpServlet { + return; + } + +- final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS); +- final Output output = getOutput(req); +- final Event event = getEvent(req); +- final Long interval = getLong(req, "interval"); +- final Integer jstackDepth = getInteger(req, "jstackdepth", null); +- final Long bufsize = getLong(req, "bufsize"); +- final boolean thread = req.getParameterMap().containsKey("thread"); +- final boolean simple = req.getParameterMap().containsKey("simple"); +- final Integer width = getInteger(req, "width", null); +- final Integer height = getInteger(req, "height", null); +- final Double minwidth = getMinWidth(req); +- final boolean reverse = req.getParameterMap().containsKey("reverse"); ++ Event event = getEvent(req); ++ int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS); ++ Long interval = getLong(req, "interval"); ++ Integer jstackDepth = getInteger(req, "jstackdepth", null); ++ boolean thread = req.getParameterMap().containsKey("thread"); ++ boolean simple = req.getParameterMap().containsKey("simple"); ++ boolean signature = req.getParameterMap().containsKey("signature"); ++ boolean annotate = req.getParameterMap().containsKey("annotate"); ++ boolean prependLib = req.getParameterMap().containsKey("prependlib"); ++ Output output = getOutput(req); ++ Double minwidth = getMinWidth(req); ++ boolean reverse = req.getParameterMap().containsKey("reverse"); + + if (process == null || !process.isAlive()) { + try { +@@ -209,11 +245,7 @@ public class ProfileServlet extends HttpServlet { + cmd.add("-e"); + cmd.add(event.getInternalName()); + cmd.add("-d"); +- cmd.add("" + duration); +- cmd.add("-o"); +- cmd.add(output.name().toLowerCase()); +- cmd.add("-f"); +- cmd.add(outputFile.getAbsolutePath()); ++ cmd.add(String.valueOf(duration)); + if (interval != null) { + cmd.add("-i"); + cmd.add(interval.toString()); +@@ -222,24 +254,25 @@ public class ProfileServlet extends HttpServlet { + cmd.add("-j"); + cmd.add(jstackDepth.toString()); + } +- if (bufsize != null) { +- cmd.add("-b"); +- cmd.add(bufsize.toString()); +- } + if (thread) { + cmd.add("-t"); + } + if (simple) { + cmd.add("-s"); + } +- if (width != null) { +- cmd.add("--width"); +- cmd.add(width.toString()); ++ if (signature) { ++ cmd.add("-g"); + } +- if (height != null) { +- cmd.add("--height"); +- cmd.add(height.toString()); ++ if (annotate) { ++ cmd.add("-a"); + } ++ if (prependLib) { ++ cmd.add("-l"); ++ } ++ cmd.add("-o"); ++ cmd.add(output.name().toLowerCase()); ++ cmd.add("-f"); ++ cmd.add(outputFile.getAbsolutePath()); + if (minwidth != null) { + cmd.add("--minwidth"); + cmd.add(minwidth.toString()); +@@ -247,6 +280,7 @@ public class ProfileServlet extends HttpServlet { + if (reverse) { + cmd.add("--reverse"); + } ++ + cmd.add(pid.toString()); + process = ProcessUtils.runCmdAsync(cmd); + +@@ -257,7 +291,10 @@ public class ProfileServlet extends HttpServlet { + resp.getWriter() + .write("Started [" + event.getInternalName() + + "] profiling. This page will automatically redirect to " + relativeUrl + " after " +- + duration + " seconds.\n\nCommand:\n" + Joiner.on(" ").join(cmd)); ++ + duration + " seconds. " ++ + "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async " ++ + "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler." ++ + "\n\nCommand:\n" + Joiner.on(" ").join(cmd)); + + // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified + // via url param +@@ -275,8 +312,9 @@ public class ProfileServlet extends HttpServlet { + resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + resp.getWriter() + .write("Unable to acquire lock. Another instance of profiler might be running."); +- LOG.warn("Unable to acquire lock in " + lockTimeoutSecs +- + " seconds. Another instance of profiler might be running."); ++ LOG.warn( ++ "Unable to acquire lock in {} seconds. Another instance of profiler might be running.", ++ lockTimeoutSecs); + } + } catch (InterruptedException e) { + LOG.warn("Interrupted while acquiring profile lock.", e); +@@ -289,9 +327,9 @@ public class ProfileServlet extends HttpServlet { + } + } + +- private Integer getInteger(final HttpServletRequest req, final String param, ++ private static Integer getInteger(final HttpServletRequest req, final String param, + final Integer defaultValue) { +- final String value = req.getParameter(param); ++ String value = req.getParameter(param); + if (value != null) { + try { + return Integer.valueOf(value); +@@ -302,8 +340,8 @@ public class ProfileServlet extends HttpServlet { + return defaultValue; + } + +- private Long getLong(final HttpServletRequest req, final String param) { +- final String value = req.getParameter(param); ++ private static Long getLong(final HttpServletRequest req, final String param) { ++ String value = req.getParameter(param); + if (value != null) { + try { + return Long.valueOf(value); +@@ -314,8 +352,8 @@ public class ProfileServlet extends HttpServlet { + return null; + } + +- private Double getMinWidth(final HttpServletRequest req) { +- final String value = req.getParameter("minwidth"); ++ private static Double getMinWidth(final HttpServletRequest req) { ++ String value = req.getParameter("minwidth"); + if (value != null) { + try { + return Double.valueOf(value); +@@ -326,8 +364,8 @@ public class ProfileServlet extends HttpServlet { + return null; + } + +- private Event getEvent(final HttpServletRequest req) { +- final String eventArg = req.getParameter("event"); ++ private static Event getEvent(final HttpServletRequest req) { ++ String eventArg = req.getParameter("event"); + if (eventArg != null) { + Event event = Event.fromInternalName(eventArg); + return event == null ? Event.CPU : event; +@@ -335,16 +373,16 @@ public class ProfileServlet extends HttpServlet { + return Event.CPU; + } + +- private Output getOutput(final HttpServletRequest req) { +- final String outputArg = req.getParameter("output"); ++ private static Output getOutput(final HttpServletRequest req) { ++ String outputArg = req.getParameter("output"); + if (req.getParameter("output") != null) { + try { + return Output.valueOf(outputArg.trim().toUpperCase()); + } catch (IllegalArgumentException e) { +- return Output.HTML; ++ return Output.FLAMEGRAPH; + } + } +- return Output.HTML; ++ return Output.FLAMEGRAPH; + } + + static void setResponseHeader(final HttpServletResponse response) { +@@ -376,8 +414,7 @@ public class ProfileServlet extends HttpServlet { + .write("The profiler servlet was disabled at startup.\n\n" + + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" +- + "http://hbase.apache.org/book.html#profiler\n"); +- return; ++ + "https://hbase.apache.org/book.html#profiler\n"); + } + + } diff --git a/hbase/hbase/stackable/patches/2.6.3/0002-Update-all-dependencies-which-have-a-new-patch-updat.patch b/hbase/hbase/stackable/patches/2.6.3/0002-Update-all-dependencies-which-have-a-new-patch-updat.patch new file mode 100644 index 000000000..bc46608d5 --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.3/0002-Update-all-dependencies-which-have-a-new-patch-updat.patch @@ -0,0 +1,114 @@ +From ba0ec22669815603616d8f232f5f941af45e42c1 Mon Sep 17 00:00:00 2001 +From: Andrew Kenworthy +Date: Tue, 30 Sep 2025 10:26:34 +0200 +Subject: Update all dependencies which have a new patch update + +--- + pom.xml | 38 +++++++++++++++++++------------------- + 1 file changed, 19 insertions(+), 19 deletions(-) + +diff --git a/pom.xml b/pom.xml +index d6ee146971..98ba66f00d 100644 +--- a/pom.xml ++++ b/pom.xml +@@ -566,7 +566,7 @@ + in the dependencyManagement section as it could still lead to different versions of netty + modules and cause trouble if we only rely on transitive dependencies. + --> +- 4.1.121.Final ++ 4.1.122.Final + + 0.13.0 + + 0.13.0 + 1.11.4 +- 2.8.1 ++ 2.8.8 + 1.15 + 2.18.0 + 3.17.0 + 3.6.1 + 1.5.0 + 3.4.4 +- 4.5.13 +- 4.4.13 ++ 4.5.14 ++ 4.4.16 + 3.2.6 + +- 2.2.1 +- 1.0.58 ++ 2.2.6 ++ 1.0.63 + 2.12.3 +- 1.78 +- 1.5.1 ++ 1.78.1 ++ 1.5.3 + 1.0.1 + 1.1.0 + 5.7.1 +@@ -646,29 +646,29 @@ + 1.0.0 + 1.8 + 3.3.0 +- 3.1.0 ++ 3.1.2 + 2.10 + 3.0.1 +- 3.4.0 ++ 3.4.1 + 3.8.1 +- 1.1.0 ++ 1.1.2 + 3.1.2 + 12.1.0 + 1.5.0.Final + 1.3.9-1 + 4.7.3 +- 4.7.2.1 +- 3.1.0 ++ 4.7.3.6 ++ 3.1.2 + 2.12 + 1.0.1 + 2.44.4 +- 3.12.0 ++ 3.12.1 + + 0.27 + 1.11.0 + 1.8.0 +- 1.1.10.4 +- 1.5.7-2 ++ 1.1.10.7 ++ 1.5.7-3 + ++ com.fasterxml.jackson.dataformat ++ jackson-dataformat-xml ++ + + org.apache.logging.log4j + log4j-slf4j-impl +diff --git a/pom.xml b/pom.xml +index 98ba66f00d..f41e50c3bb 100644 +--- a/pom.xml ++++ b/pom.xml +@@ -1173,6 +1173,11 @@ + log4j-core + ${log4j2.version} + ++ ++ org.apache.logging.log4j ++ log4j-core ++ ${log4j2.version} ++ + + org.apache.logging.log4j + log4j-slf4j-impl +@@ -1183,6 +1188,13 @@ + log4j-1.2-api + ${log4j2.version} + ++ ++ ++ com.fasterxml.jackson.dataformat ++ jackson-dataformat-xml ++ ${jackson.databind.version} ++ ++ + + + org.apache.avro diff --git a/hbase/hbase/stackable/patches/2.6.3/0004-Bump-cyclonedx-maven-plugin-to-version-2.9.1-and-twe.patch b/hbase/hbase/stackable/patches/2.6.3/0004-Bump-cyclonedx-maven-plugin-to-version-2.9.1-and-twe.patch new file mode 100644 index 000000000..0823bfbf6 --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.3/0004-Bump-cyclonedx-maven-plugin-to-version-2.9.1-and-twe.patch @@ -0,0 +1,27 @@ +From 646a90225985605e3dab3bbca3b04182a004bb2b Mon Sep 17 00:00:00 2001 +From: Lukas Voetmand +Date: Fri, 6 Sep 2024 17:53:52 +0200 +Subject: Bump cyclonedx-maven-plugin to version 2.9.1 and tweak its + configuration + +--- + pom.xml | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/pom.xml b/pom.xml +index f41e50c3bb..f6a47dd880 100644 +--- a/pom.xml ++++ b/pom.xml +@@ -3315,7 +3315,11 @@ + + org.cyclonedx + cyclonedx-maven-plugin +- 2.7.10 ++ 2.9.1 ++ ++ application ++ 1.5 ++ + + + diff --git a/hbase/hbase/stackable/patches/2.6.3/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch b/hbase/hbase/stackable/patches/2.6.3/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch new file mode 100644 index 000000000..b67fd3558 --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.3/0005-Allow-overriding-ipc-bind-port-and-use-alternative-p.patch @@ -0,0 +1,314 @@ +From 8deb539a05bdf88189b6b269ec961ff6abe6a174 Mon Sep 17 00:00:00 2001 +From: Andrew Kenworthy +Date: Mon, 16 Jun 2025 14:44:32 +0200 +Subject: Allow overriding ipc bind port and use alternative port from listener + +--- + .../org/apache/hadoop/hbase/HConstants.java | 29 ++++++++++-- + .../apache/hadoop/hbase/master/HMaster.java | 20 +++++++-- + .../hbase/regionserver/HRegionServer.java | 45 +++++++++++++------ + .../hbase/regionserver/RSRpcServices.java | 8 +++- + 4 files changed, 80 insertions(+), 22 deletions(-) + +diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +index 14d7073d5d..f0e286d666 100644 +--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java ++++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +@@ -195,18 +195,27 @@ public final class HConstants { + /** default host address */ + public static final String DEFAULT_HOST = "0.0.0.0"; + +- /** Parameter name for port master listens on. */ ++ /** Parameter name for port master advertises as listening on. */ + public static final String MASTER_PORT = "hbase.master.port"; + ++ /** Parameter name for IPC address that master listens on. (Defaults to hostname.) */ ++ public static final String MASTER_IPC_ADDRESS = "hbase.master.ipc.address"; ++ ++ /** Parameter name for IPC port that master listens on. (Defaults to MASTER_PORT.) */ ++ public static final String MASTER_IPC_PORT = "hbase.master.ipc.port"; ++ + /** default port that the master listens on */ + public static final int DEFAULT_MASTER_PORT = 16000; + + /** default port for master web api */ + public static final int DEFAULT_MASTER_INFOPORT = 16010; + +- /** Configuration key for master web API port */ ++ /** Configuration key for advertised master web API port */ + public static final String MASTER_INFO_PORT = "hbase.master.info.port"; + ++ /** Configuration key for bound master web API port. (Defaults to MASTER_INFO_PORT.) */ ++ public static final String MASTER_BOUND_INFO_PORT = "hbase.master.bound.info.port"; ++ + /** Configuration key for the list of master host:ports **/ + public static final String MASTER_ADDRS_KEY = "hbase.masters"; + +@@ -314,18 +323,27 @@ public final class HConstants { + /** Default value for ZooKeeper session timeout */ + public static final int DEFAULT_ZK_SESSION_TIMEOUT = 90 * 1000; + +- /** Parameter name for port region server listens on. */ ++ /** Parameter name for port region server advertises as listening on. */ + public static final String REGIONSERVER_PORT = "hbase.regionserver.port"; + ++ /** Parameter name for IPC address that region server listens on. (Defaults to hostname.) */ ++ public static final String REGIONSERVER_IPC_ADDRESS = "hbase.regionserver.ipc.address"; ++ ++ /** Parameter name for IPC port that region server listens on. (Defaults to REGIONSERVER_PORT.) */ ++ public static final String REGIONSERVER_IPC_PORT = "hbase.regionserver.ipc.port"; ++ + /** Default port region server listens on. */ + public static final int DEFAULT_REGIONSERVER_PORT = 16020; + + /** default port for region server web api */ + public static final int DEFAULT_REGIONSERVER_INFOPORT = 16030; + +- /** A configuration key for regionserver info port */ ++ /** Configuration key for advertised region server web API port */ + public static final String REGIONSERVER_INFO_PORT = "hbase.regionserver.info.port"; + ++ /** Configuration key for bound region server web API port. (Defaults to REGIONSERVER_INFO_PORT.) */ ++ public static final String REGIONSERVER_BOUND_INFO_PORT = "hbase.regionserver.bound.info.port"; ++ + /** A flag that enables automatic selection of regionserver info port */ + public static final String REGIONSERVER_INFO_PORT_AUTO = REGIONSERVER_INFO_PORT + ".auto"; + +@@ -1402,6 +1420,9 @@ public final class HConstants { + /** Configuration key for setting RPC codec class name */ + public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec"; + ++ /** Configuration key for setting that the RPC client should bind the client address. This forces outgoing RPC traffic to happen from the same network interface that the RPC server is bound on. */ ++ public static final String RPC_CLIENT_BIND_ADDRESS = "hbase.client.rpc.bind.address"; ++ + /** Configuration key for setting replication codec class name */ + public static final String REPLICATION_CODEC_CONF_KEY = "hbase.replication.rpc.codec"; + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +index e099760a7a..7eab15970c 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +@@ -17,6 +17,8 @@ + */ + package org.apache.hadoop.hbase.master; + ++import static org.apache.hadoop.hbase.HConstants.MASTER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.MASTER_PORT; + import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; +@@ -571,6 +573,18 @@ public class HMaster extends HRegionServer implements MasterServices { + return conf.get(MASTER_HOSTNAME_KEY); + } + ++ @Override ++ protected int getUseThisPortInstead(Configuration conf) { ++ int port = conf.getInt(MASTER_PORT, 0); ++ return port != 0 ? port : this.rpcServices.getSocketAddress().getPort(); ++ } ++ ++ @Override ++ protected int getUseThisInfoPortInstead(Configuration conf) { ++ int port = conf.getInt(MASTER_BOUND_INFO_PORT, 0); ++ return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; ++ } ++ + private void registerConfigurationObservers() { + configurationManager.registerObserver(this.rpcServices); + configurationManager.registerObserver(this); +@@ -598,8 +612,8 @@ public class HMaster extends HRegionServer implements MasterServices { + registerConfigurationObservers(); + Threads.setDaemonThreadRunning(new Thread(() -> TraceUtil.trace(() -> { + try { +- int infoPort = putUpJettyServer(); +- startActiveMasterManager(infoPort); ++ putUpJettyServer(); ++ startActiveMasterManager(useThisInfoPortInstead); + } catch (Throwable t) { + // Make sure we log the exception. + String error = "Failed to become Active Master"; +@@ -3017,7 +3031,7 @@ public class HMaster extends HRegionServer implements MasterServices { + } + case MASTER_INFO_PORT: { + if (infoServer != null) { +- builder.setMasterInfoPort(infoServer.getPort()); ++ builder.setMasterInfoPort(useThisInfoPortInstead); + } + break; + } +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +index 810e10f1c5..454c5580c4 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +@@ -24,6 +24,9 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_WAL_MAX_SPL + import static org.apache.hadoop.hbase.HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_CHORE_DURATION; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_PORT; ++import static org.apache.hadoop.hbase.HConstants.RPC_CLIENT_BIND_ADDRESS; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_DEFAULT; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_KEY; + import static org.apache.hadoop.hbase.namequeues.NamedQueueServiceChore.NAMED_QUEUE_CHORE_DURATION_DEFAULT; +@@ -505,6 +508,10 @@ public class HRegionServer extends Thread + */ + protected String useThisHostnameInstead; + ++ protected int useThisPortInstead; ++ ++ protected int useThisInfoPortInstead; ++ + /** + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link HRegionServer#UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY} instead. +@@ -669,6 +676,8 @@ public class HRegionServer extends Thread + this.namedQueueRecorder = NamedQueueRecorder.getInstance(this.conf); + rpcServices = createRpcServices(); + useThisHostnameInstead = getUseThisHostnameInstead(conf); ++ useThisPortInstead = getUseThisPortInstead(conf); ++ useThisInfoPortInstead = getUseThisInfoPortInstead(conf); + + // if use-ip is enabled, we will use ip to expose Master/RS service for client, + // see HBASE-27304 for details. +@@ -678,7 +687,7 @@ public class HRegionServer extends Thread + useIp ? rpcServices.isa.getAddress().getHostAddress() : rpcServices.isa.getHostName(); + String hostName = + StringUtils.isBlank(useThisHostnameInstead) ? isaHostName : useThisHostnameInstead; +- serverName = ServerName.valueOf(hostName, this.rpcServices.isa.getPort(), this.startcode); ++ serverName = ServerName.valueOf(hostName, useThisPortInstead, this.startcode); + + rpcControllerFactory = RpcControllerFactory.instantiate(this.conf); + rpcRetryingCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf, +@@ -715,7 +724,7 @@ public class HRegionServer extends Thread + + // Some unit tests don't need a cluster, so no zookeeper at all + // Open connection to zookeeper and set primary watcher +- zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + rpcServices.isa.getPort(), this, ++ zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + useThisPortInstead, this, + canCreateBaseZNode()); + // If no master in cluster, skip trying to track one or look for a cluster status. + if (!this.masterless) { +@@ -776,6 +785,16 @@ public class HRegionServer extends Thread + } + } + ++ protected int getUseThisPortInstead(Configuration conf) { ++ int port = conf.getInt(REGIONSERVER_PORT, 0); ++ return port != 0 ? port : this.rpcServices.isa.getPort(); ++ } ++ ++ protected int getUseThisInfoPortInstead(Configuration conf) { ++ int port = conf.getInt(REGIONSERVER_BOUND_INFO_PORT, 0); ++ return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; ++ } ++ + private void setupSignalHandlers() { + if (!SystemUtils.IS_OS_WINDOWS) { + HBasePlatformDependent.handle("HUP", (number, name) -> { +@@ -958,7 +977,7 @@ public class HRegionServer extends Thread + } + // Setup RPC client for master communication + this.rpcClient = RpcClientFactory.createClient(conf, clusterId, +- new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), ++ getInetSocketAddress(this.conf), + clusterConnection.getConnectionMetrics(), Collections.emptyMap()); + span.setStatus(StatusCode.OK); + } catch (Throwable t) { +@@ -972,6 +991,11 @@ public class HRegionServer extends Thread + } + } + ++ private InetSocketAddress getInetSocketAddress(Configuration conf) { ++ return conf.getBoolean(RPC_CLIENT_BIND_ADDRESS, true) ? ++ new InetSocketAddress(this.rpcServices.isa.getAddress(), 0) : new InetSocketAddress(0); ++ } ++ + /** + * Bring up connection to zk ensemble and then wait until a master for this cluster and then after + * that, wait until cluster 'up' flag has been set. This is the order in which master does things. +@@ -1528,11 +1552,7 @@ public class HRegionServer extends Thread + + serverLoad.setReportStartTime(reportStartTime); + serverLoad.setReportEndTime(reportEndTime); +- if (this.infoServer != null) { +- serverLoad.setInfoServerPort(this.infoServer.getPort()); +- } else { +- serverLoad.setInfoServerPort(-1); +- } ++ serverLoad.setInfoServerPort(useThisInfoPortInstead); + MetricsUserAggregateSource userSource = + metricsRegionServer.getMetricsUserAggregate().getSource(); + if (userSource != null) { +@@ -1688,7 +1708,7 @@ public class HRegionServer extends Thread + if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) { + String hostnameFromMasterPOV = e.getValue(); + this.serverName = ServerName.valueOf(hostnameFromMasterPOV, +- rpcServices.getSocketAddress().getPort(), this.startcode); ++ useThisPortInstead, this.startcode); + String expectedHostName = rpcServices.getSocketAddress().getHostName(); + // if Master use-ip is enabled, RegionServer use-ip will be enabled by default even if it + // is set to disable. so we will use the ip of the RegionServer to compare with the +@@ -1814,7 +1834,7 @@ public class HRegionServer extends Thread + + private void createMyEphemeralNode() throws KeeperException { + RegionServerInfo.Builder rsInfo = RegionServerInfo.newBuilder(); +- rsInfo.setInfoPort(infoServer != null ? infoServer.getPort() : -1); ++ rsInfo.setInfoPort(infoServer != null ? useThisInfoPortInstead : -1); + rsInfo.setVersionInfo(ProtobufUtil.getVersionInfo()); + byte[] data = ProtobufUtil.prependPBMagic(rsInfo.build().toByteArray()); + ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, getMyEphemeralNodePath(), data); +@@ -2487,7 +2507,7 @@ public class HRegionServer extends Thread + LOG.info("Retry starting http info server with port: " + port); + } + } +- port = this.infoServer.getPort(); ++ port = useThisInfoPortInstead; + conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); + int masterInfoPort = + conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); +@@ -3081,12 +3101,11 @@ public class HRegionServer extends Thread + LOG.info("reportForDuty to master=" + masterServerName + " with isa=" + rpcServices.isa + + ", startcode=" + this.startcode); + long now = EnvironmentEdgeManager.currentTime(); +- int port = rpcServices.isa.getPort(); + RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); + if (!StringUtils.isBlank(useThisHostnameInstead)) { + request.setUseThisHostnameInstead(useThisHostnameInstead); + } +- request.setPort(port); ++ request.setPort(useThisPortInstead); + request.setServerStartCode(this.startcode); + request.setServerCurrentTime(now); + result = rss.regionServerStartup(null, request.build()); +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +index 00ba123342..bcb68822cc 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +@@ -282,6 +282,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescr + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor; + import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor; ++import static org.apache.hadoop.hbase.HConstants.MASTER_IPC_ADDRESS; ++import static org.apache.hadoop.hbase.HConstants.MASTER_IPC_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_IPC_ADDRESS; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_IPC_PORT; + + /** + * Implements the regionserver RPC services. +@@ -1272,14 +1276,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler, AdminService.Blockin + int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); + // Creation of a HSA will force a resolve. + initialIsa = new InetSocketAddress(hostname, port); +- bindAddress = new InetSocketAddress(conf.get("hbase.master.ipc.address", hostname), port); ++ bindAddress = new InetSocketAddress(conf.get(MASTER_IPC_ADDRESS, hostname), conf.getInt(MASTER_IPC_PORT, port)); + } else { + String hostname = DNS.getHostname(conf, DNS.ServerType.REGIONSERVER); + int port = conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT); + // Creation of a HSA will force a resolve. + initialIsa = new InetSocketAddress(hostname, port); + bindAddress = +- new InetSocketAddress(conf.get("hbase.regionserver.ipc.address", hostname), port); ++ new InetSocketAddress(conf.get(REGIONSERVER_IPC_ADDRESS, hostname), conf.getInt(REGIONSERVER_IPC_PORT, port)); + } + if (initialIsa.getAddress() == null) { + throw new IllegalArgumentException("Failed resolve of " + initialIsa); diff --git a/hbase/hbase/stackable/patches/2.6.3/0006-Update-property-usage-for-bound-ports.patch b/hbase/hbase/stackable/patches/2.6.3/0006-Update-property-usage-for-bound-ports.patch new file mode 100644 index 000000000..4947d5e72 --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.3/0006-Update-property-usage-for-bound-ports.patch @@ -0,0 +1,156 @@ +From 5c2d5d0cf2fceec60fff75736776f9e0ae0ff178 Mon Sep 17 00:00:00 2001 +From: Andrew Kenworthy +Date: Thu, 26 Jun 2025 16:58:47 +0200 +Subject: Update property usage for bound ports + +--- + .../org/apache/hadoop/hbase/HConstants.java | 4 ++-- + .../hadoop/hbase/LocalHBaseCluster.java | 12 +++++------ + .../apache/hadoop/hbase/master/HMaster.java | 6 +++--- + .../hbase/regionserver/HRegionServer.java | 21 +++++++++++++------ + 4 files changed, 26 insertions(+), 17 deletions(-) + +diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +index f0e286d666..cd329b08e6 100644 +--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java ++++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +@@ -213,7 +213,7 @@ public final class HConstants { + /** Configuration key for advertised master web API port */ + public static final String MASTER_INFO_PORT = "hbase.master.info.port"; + +- /** Configuration key for bound master web API port. (Defaults to MASTER_INFO_PORT.) */ ++ /** Configuration key for bound master web API port */ + public static final String MASTER_BOUND_INFO_PORT = "hbase.master.bound.info.port"; + + /** Configuration key for the list of master host:ports **/ +@@ -341,7 +341,7 @@ public final class HConstants { + /** Configuration key for advertised region server web API port */ + public static final String REGIONSERVER_INFO_PORT = "hbase.regionserver.info.port"; + +- /** Configuration key for bound region server web API port. (Defaults to REGIONSERVER_INFO_PORT.) */ ++ /** Configuration key for bound region server web API port */ + public static final String REGIONSERVER_BOUND_INFO_PORT = "hbase.regionserver.bound.info.port"; + + /** A flag that enables automatic selection of regionserver info port */ +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +index 816ef997cb..2114725986 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +@@ -144,20 +144,20 @@ public class LocalHBaseCluster { + // treat info ports special; expressly don't change '-1' (keep off) + // in case we make that the default behavior. + if ( +- conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1 +- && conf.getInt(HConstants.REGIONSERVER_INFO_PORT, ++ conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, 0) != -1 ++ && conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, + HConstants.DEFAULT_REGIONSERVER_INFOPORT) == HConstants.DEFAULT_REGIONSERVER_INFOPORT + ) { + LOG.debug("Setting RS InfoServer Port to random."); +- conf.set(HConstants.REGIONSERVER_INFO_PORT, "0"); ++ conf.set(HConstants.REGIONSERVER_BOUND_INFO_PORT, "0"); + } + if ( +- conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1 +- && conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) ++ conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, 0) != -1 ++ && conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) + == HConstants.DEFAULT_MASTER_INFOPORT + ) { + LOG.debug("Setting Master InfoServer Port to random."); +- conf.set(HConstants.MASTER_INFO_PORT, "0"); ++ conf.set(HConstants.MASTER_BOUND_INFO_PORT, "0"); + } + } + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +index 7eab15970c..8f9a95c506 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +@@ -17,7 +17,7 @@ + */ + package org.apache.hadoop.hbase.master; + +-import static org.apache.hadoop.hbase.HConstants.MASTER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.MASTER_INFO_PORT; + import static org.apache.hadoop.hbase.HConstants.MASTER_PORT; + import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; +@@ -581,7 +581,7 @@ public class HMaster extends HRegionServer implements MasterServices { + + @Override + protected int getUseThisInfoPortInstead(Configuration conf) { +- int port = conf.getInt(MASTER_BOUND_INFO_PORT, 0); ++ int port = conf.getInt(MASTER_INFO_PORT, 0); + return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; + } + +@@ -3169,7 +3169,7 @@ public class HMaster extends HRegionServer implements MasterServices { + public int getRegionServerInfoPort(final ServerName sn) { + int port = this.serverManager.getInfoPort(sn); + return port == 0 +- ? conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) ++ ? conf.getInt(HConstants.REGIONSERVER_BOUND_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT) + : port; + } + +diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +index 454c5580c4..bf35b0a38a 100644 +--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ++++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +@@ -25,6 +25,7 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_CHOR + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; + import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; + import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_BOUND_INFO_PORT; ++import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_INFO_PORT; + import static org.apache.hadoop.hbase.HConstants.REGIONSERVER_PORT; + import static org.apache.hadoop.hbase.HConstants.RPC_CLIENT_BIND_ADDRESS; + import static org.apache.hadoop.hbase.master.waleventtracker.WALEventTrackerTableCreator.WAL_EVENT_TRACKER_ENABLED_DEFAULT; +@@ -791,7 +792,7 @@ public class HRegionServer extends Thread + } + + protected int getUseThisInfoPortInstead(Configuration conf) { +- int port = conf.getInt(REGIONSERVER_BOUND_INFO_PORT, 0); ++ int port = conf.getInt(REGIONSERVER_INFO_PORT, 0); + return port != 0 ? port : this.infoServer != null ? this.infoServer.getPort() : -1; + } + +@@ -2465,12 +2466,14 @@ public class HRegionServer extends Thread + */ + private void putUpWebUI() throws IOException { + int port = +- this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT); ++ this.conf.getInt(REGIONSERVER_BOUND_INFO_PORT, ++ this.conf.getInt(REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT)); + String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0"); + + boolean isMaster = false; + if (this instanceof HMaster) { +- port = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); ++ port = conf.getInt(HConstants.MASTER_BOUND_INFO_PORT, ++ this.conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT)); + addr = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0"); + isMaster = true; + } +@@ -2507,12 +2510,18 @@ public class HRegionServer extends Thread + LOG.info("Retry starting http info server with port: " + port); + } + } +- port = useThisInfoPortInstead; +- conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); ++ ++ // update bound ports ++ port = this.infoServer.getPort(); ++ conf.setInt(REGIONSERVER_BOUND_INFO_PORT, port); ++ conf.setInt(HConstants.MASTER_BOUND_INFO_PORT, port); ++ ++ // set advertised ports ++ conf.setInt(REGIONSERVER_INFO_PORT, useThisInfoPortInstead); + int masterInfoPort = + conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); + conf.setInt("hbase.master.info.port.orig", masterInfoPort); +- conf.setInt(HConstants.MASTER_INFO_PORT, port); ++ conf.setInt(HConstants.MASTER_INFO_PORT, useThisInfoPortInstead); + } + + /* diff --git a/hbase/hbase/stackable/patches/2.6.3/patchable.toml b/hbase/hbase/stackable/patches/2.6.3/patchable.toml new file mode 100644 index 000000000..a3f9f961c --- /dev/null +++ b/hbase/hbase/stackable/patches/2.6.3/patchable.toml @@ -0,0 +1,2 @@ +mirror = "https://github.com/stackabletech/hbase.git" +base = "01ac2f51f3bb4cee623ea7dced75bfccbb6b562e" diff --git a/hbase/phoenix/boil-config.toml b/hbase/phoenix/boil-config.toml index 8425f970a..faa7f48fe 100644 --- a/hbase/phoenix/boil-config.toml +++ b/hbase/phoenix/boil-config.toml @@ -1,19 +1,19 @@ -[versions."5.2.1-hbase2.6.1".local-images] -"hbase/hbase" = "2.6.1" -"hadoop/hadoop" = "3.3.6" +[versions."5.2.1-hbase2.6.2".local-images] +"hbase/hbase" = "2.6.2" +"hadoop/hadoop" = "3.4.1" java-devel = "11" -[versions."5.2.1-hbase2.6.1".build-arguments] +[versions."5.2.1-hbase2.6.2".build-arguments] phoenix-version = "5.2.1" hbase-profile = "2.6" delete-caches = "true" -[versions."5.2.1-hbase2.6.2".local-images] -"hbase/hbase" = "2.6.2" -"hadoop/hadoop" = "3.4.1" +[versions."5.2.1-hbase2.6.3".local-images] +"hbase/hbase" = "2.6.3" +"hadoop/hadoop" = "3.4.2" java-devel = "11" -[versions."5.2.1-hbase2.6.2".build-arguments] +[versions."5.2.1-hbase2.6.3".build-arguments] phoenix-version = "5.2.1" hbase-profile = "2.6" delete-caches = "true"