diff --git a/CHANGELOG.md b/CHANGELOG.md
index ad5019efc..d64987b58 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,6 +8,15 @@ All notable changes to this project will be documented in this file.
- omid: init at 1.1.0 ([#493]).
- hadoop: Allow datanodes to override their registration addresses ([#506], [#544]).
+- hadoop: Add async-profiler and backport HADOOP-18055 and HADOOP-18077
+ to support it ([#540]).
+- hadoop: Add `tar` package, so that `kubectl cp` can be used to copy
+ log files and profiler flamegraphs ([#540]).
+- hbase: Add async-profiler and backport HBASE-28242 to support it
+ ([#540]).
+- hbase: Allow multiple certificates in the KeyStores which is required for
+ rotating CA certificates. Because of this, HBASE-27027 was backported to
+ HBase version 2.4.12 ([#540]).
- nifi: Add Apache Iceberg extensions ([#529]).
- testing-tools: Add krb5-user library for Kerberos tests ([#531]).
- testing-tools: Add the Python library Beautiful Soup 4 ([#536]).
@@ -23,6 +32,10 @@ All notable changes to this project will be documented in this file.
- hadoop: Build from source ([#526]).
- superset: Add patch that fixes saved queries export ([#539]).
+### Removed
+
+- hadoop: Remove support for version 3.2.2 ([#540]).
+
[#493]: https://github.com/stackabletech/docker-images/pull/493
[#506]: https://github.com/stackabletech/docker-images/pull/506
[#514]: https://github.com/stackabletech/docker-images/pull/514
@@ -35,6 +48,7 @@ All notable changes to this project will be documented in this file.
[#537]: https://github.com/stackabletech/docker-images/pull/537
[#538]: https://github.com/stackabletech/docker-images/pull/538
[#539]: https://github.com/stackabletech/docker-images/pull/539
+[#540]: https://github.com/stackabletech/docker-images/pull/540
[#542]: https://github.com/stackabletech/docker-images/pull/542
[#544]: https://github.com/stackabletech/docker-images/pull/544
diff --git a/conf.py b/conf.py
index 8d3b161d7..dbfc72d25 100644
--- a/conf.py
+++ b/conf.py
@@ -58,16 +58,10 @@
{
"name": "hadoop",
"versions": [
- {
- "product": "3.2.2",
- "java-base": "11",
- "jmx_exporter": "0.20.0",
- "protobuf": "2.5.0",
- "topology_provider": "0.1.0"
- },
{
"product": "3.2.4",
"java-base": "11",
+ "async_profiler": "2.9",
"jmx_exporter": "0.20.0",
"protobuf": "2.5.0",
"topology_provider": "0.1.0"
@@ -75,6 +69,7 @@
{
"product": "3.3.4",
"java-base": "11",
+ "async_profiler": "2.9",
"jmx_exporter": "0.20.0",
"protobuf": "3.7.1",
"topology_provider": "0.1.0"
@@ -82,6 +77,7 @@
{
"product": "3.3.6",
"java-base": "11",
+ "async_profiler": "2.9",
"jmx_exporter": "0.20.0",
"protobuf": "3.7.1",
"topology_provider": "0.1.0"
@@ -100,6 +96,7 @@
"hbase_thirdparty": "3.5.1",
"hbase_operator_tools": "1.2.0",
"java-base": "11",
+ "async_profiler": "2.9",
"phoenix": "2.4-5.1.2",
"hadoop_m2": "3.3.6",
"jmx_exporter": "0.20.0",
@@ -109,6 +106,7 @@
"hbase_thirdparty": "4.1.4",
"hbase_operator_tools": "1.2.0",
"java-base": "11",
+ "async_profiler": "2.9",
"phoenix": "2.4-5.1.3",
"hadoop_m2": "3.3.6",
"jmx_exporter": "0.20.0",
diff --git a/hadoop/Dockerfile b/hadoop/Dockerfile
index e9bb3fab7..8a09edf83 100644
--- a/hadoop/Dockerfile
+++ b/hadoop/Dockerfile
@@ -2,9 +2,12 @@
FROM stackable/image/java-base AS builder
ARG PRODUCT
+ARG ASYNC_PROFILER
ARG JMX_EXPORTER
ARG PROTOBUF
ARG TOPOLOGY_PROVIDER
+ARG TARGETARCH
+ARG TARGETOS
# https://github.com/hadolint/hadolint/wiki/DL4006
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
@@ -36,6 +39,10 @@ RUN curl --fail "https://repo.stackable.tech/repository/packages/jmx-exporter/jm
ln -s "/stackable/jmx/jmx_prometheus_javaagent-${JMX_EXPORTER}.jar" /stackable/jmx/jmx_prometheus_javaagent.jar && \
ln -s /stackable/jmx/jmx_prometheus_javaagent.jar /stackable/jmx/jmx_prometheus_javaagent-0.16.1.jar
+RUN ARCH="${TARGETARCH/amd64/x64}" && \
+ curl --fail -L "https://repo.stackable.tech/repository/packages/async-profiler/async-profiler-${ASYNC_PROFILER}-${TARGETOS}-${ARCH}.tar.gz" | tar -xzC . && \
+ ln -s "/stackable/async-profiler-${ASYNC_PROFILER}-${TARGETOS}-${ARCH}" /stackable/async-profiler
+
# This Protobuf version is the exact version as used in the Hadoop Dockerfile
# See https://github.com/apache/hadoop/blob/trunk/dev-support/docker/pkg-resolver/install-protobuf.sh
# (this was hardcoded in the Dockerfile in earlier versions of Hadoop, make sure to look at the exact version in Github)
@@ -107,7 +114,10 @@ RUN microdnf update && \
microdnf install \
fuse \
fuse-libs \
- krb5-workstation && \
+ krb5-workstation \
+ # tar is required for `kubectl cp` which can be used to copy the log files
+ # or profiler flamegraph from the Pod
+ tar && \
microdnf clean all && \
rm -rf /var/cache/yum
@@ -122,6 +132,7 @@ WORKDIR /stackable
COPY --chown=stackable:stackable --from=builder /stackable/hadoop-${PRODUCT} /stackable/hadoop-${PRODUCT}/
COPY --chown=stackable:stackable --from=builder /stackable/jmx /stackable/jmx/
+COPY --chown=stackable:stackable --from=builder /stackable/async-profiler /stackable/async-profiler/
# The topology provider provides rack awareness functionality for HDFS by allowing users to specify Kubernetes
# labels to build a rackID from
@@ -136,6 +147,7 @@ ENV LD_LIBRARY_PATH=/stackable/hadoop/lib/native:/usr/lib/jvm/jre/lib/server
ENV PATH="${PATH}":/stackable/hadoop/bin
ENV HADOOP_HOME=/stackable/hadoop
ENV HADOOP_CONF_DIR=/stackable/config
+ENV ASYNC_PROFILER_HOME=/stackable/async-profiler
WORKDIR /stackable/hadoop
CMD ["echo", "This image is not meant to be 'run' directly."]
diff --git a/hadoop/stackable/patches/3.2.4/002-HADOOP-18055-3.2.4.patch b/hadoop/stackable/patches/3.2.4/002-HADOOP-18055-3.2.4.patch
new file mode 100644
index 000000000..1d618ddcc
--- /dev/null
+++ b/hadoop/stackable/patches/3.2.4/002-HADOOP-18055-3.2.4.patch
@@ -0,0 +1,999 @@
+diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+index 705f9980ffbb..39ca69c85f4f 100644
+--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
++++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+@@ -27,6 +27,9 @@
+ import java.net.MalformedURLException;
+ import java.net.URI;
+ import java.net.URL;
++import java.nio.file.Files;
++import java.nio.file.Path;
++import java.nio.file.Paths;
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.Enumeration;
+@@ -635,6 +638,8 @@ private void initializeWebServer(String name, String hostName,
+ addFilterPathMapping(path, webAppContext);
+ }
+ }
++
++ addAsyncProfilerServlet(contexts);
+ }
+
+ private void addListener(ServerConnector connector) {
+@@ -781,6 +786,25 @@ protected void addDefaultServlets() {
+ addServlet("conf", "/conf", ConfServlet.class);
+ }
+
++ private void addAsyncProfilerServlet(ContextHandlerCollection contexts) throws IOException {
++ final String asyncProfilerHome = ProfileServlet.getAsyncProfilerHome();
++ if (asyncProfilerHome != null && !asyncProfilerHome.trim().isEmpty()) {
++ addServlet("prof", "/prof", ProfileServlet.class);
++ Path tmpDir = Paths.get(ProfileServlet.OUTPUT_DIR);
++ if (Files.notExists(tmpDir)) {
++ Files.createDirectories(tmpDir);
++ }
++ ServletContextHandler genCtx = new ServletContextHandler(contexts, "/prof-output-hadoop");
++ genCtx.addServlet(ProfileOutputServlet.class, "/*");
++ genCtx.setResourceBase(tmpDir.toAbsolutePath().toString());
++ genCtx.setDisplayName("prof-output-hadoop");
++ } else {
++ addServlet("prof", "/prof", ProfilerDisabledServlet.class);
++ LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property "
++ + "not specified. Disabling /prof endpoint.");
++ }
++ }
++
+ public void addContext(ServletContextHandler ctxt, boolean isFiltered) {
+ handlers.addHandler(ctxt);
+ addNoCacheFilter(ctxt);
+diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileOutputServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileOutputServlet.java
+new file mode 100644
+index 000000000000..1ecc21f3753c
+--- /dev/null
++++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileOutputServlet.java
+@@ -0,0 +1,87 @@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.http;
++
++import java.io.File;
++import java.io.IOException;
++import java.util.regex.Pattern;
++import javax.servlet.ServletException;
++import javax.servlet.http.HttpServletRequest;
++import javax.servlet.http.HttpServletResponse;
++
++import org.eclipse.jetty.servlet.DefaultServlet;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++import org.apache.hadoop.classification.InterfaceAudience;
++
++/**
++ * Servlet to serve files generated by {@link ProfileServlet}.
++ */
++@InterfaceAudience.Private
++public class ProfileOutputServlet extends DefaultServlet {
++
++ private static final long serialVersionUID = 1L;
++
++ private static final Logger LOG = LoggerFactory.getLogger(ProfileOutputServlet.class);
++ // default refresh period 2 sec
++ private static final int REFRESH_PERIOD = 2;
++ // Alphanumeric characters, plus percent (url-encoding), equals, ampersand, dot and hyphen
++ private static final Pattern ALPHA_NUMERIC = Pattern.compile("[a-zA-Z0-9%=&.\\-]*");
++
++ @Override
++ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
++ throws ServletException, IOException {
++ if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(), req, resp)) {
++ resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
++ ProfileServlet.setResponseHeader(resp);
++ resp.getWriter().write("Unauthorized: Instrumentation access is not allowed!");
++ return;
++ }
++
++ String absoluteDiskPath = getServletContext().getRealPath(req.getPathInfo());
++ File requestedFile = new File(absoluteDiskPath);
++ // async-profiler version 1.4 writes 'Started [cpu] profiling' to output file when profiler is
++ // running which gets replaced by final output. If final output is not ready yet, the file size
++ // will be <100 bytes (in all modes).
++ if (requestedFile.length() < 100) {
++ LOG.info("{} is incomplete. Sending auto-refresh header.", requestedFile);
++ String refreshUrl = req.getRequestURI();
++ // Rebuild the query string (if we have one)
++ if (req.getQueryString() != null) {
++ refreshUrl += "?" + sanitize(req.getQueryString());
++ }
++ ProfileServlet.setResponseHeader(resp);
++ resp.setHeader("Refresh", REFRESH_PERIOD + ";" + refreshUrl);
++ resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD
++ + " seconds until the output file is ready. Redirecting to " + refreshUrl);
++ } else {
++ super.doGet(req, resp);
++ }
++ }
++
++ static String sanitize(String input) {
++ // Basic test to try to avoid any XSS attacks or HTML content showing up.
++ // Duplicates HtmlQuoting a little, but avoid destroying ampersand.
++ if (ALPHA_NUMERIC.matcher(input).matches()) {
++ return input;
++ }
++ throw new RuntimeException("Non-alphanumeric data found in input, aborting.");
++ }
++}
+diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java
+new file mode 100644
+index 000000000000..3e19dcde35d7
+--- /dev/null
++++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfileServlet.java
+@@ -0,0 +1,394 @@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.http;
++
++import java.io.File;
++import java.io.IOException;
++import java.util.ArrayList;
++import java.util.List;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.concurrent.locks.Lock;
++import java.util.concurrent.locks.ReentrantLock;
++import javax.servlet.http.HttpServlet;
++import javax.servlet.http.HttpServletRequest;
++import javax.servlet.http.HttpServletResponse;
++
++import com.google.common.base.Joiner;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++import org.apache.hadoop.classification.InterfaceAudience;
++import org.apache.hadoop.util.ProcessUtils;
++
++/**
++ * Servlet that runs async-profiler as web-endpoint.
++ *
++ * Following options from async-profiler can be specified as query paramater.
++ * // -e event profiling event: cpu|alloc|lock|cache-misses etc.
++ * // -d duration run profiling for 'duration' seconds (integer)
++ * // -i interval sampling interval in nanoseconds (long)
++ * // -j jstackdepth maximum Java stack depth (integer)
++ * // -b bufsize frame buffer size (long)
++ * // -t profile different threads separately
++ * // -s simple class names instead of FQN
++ * // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html
++ * // --width px SVG width pixels (integer)
++ * // --height px SVG frame height pixels (integer)
++ * // --minwidth px skip frames smaller than px (double)
++ * // --reverse generate stack-reversed FlameGraph / Call tree
++ *
++ * Example:
++ * If Namenode http address is localhost:9870, and ResourceManager http address is localhost:8088,
++ * ProfileServlet running with async-profiler setup can be accessed with
++ * http://localhost:9870/prof and http://localhost:8088/prof for Namenode and ResourceManager
++ * processes respectively.
++ * Deep dive into some params:
++ * - To collect 10 second CPU profile of current process i.e. Namenode (returns FlameGraph svg)
++ * curl "http://localhost:9870/prof"
++ * - To collect 10 second CPU profile of pid 12345 (returns FlameGraph svg)
++ * curl "http://localhost:9870/prof?pid=12345" (For instance, provide pid of Datanode)
++ * - To collect 30 second CPU profile of pid 12345 (returns FlameGraph svg)
++ * curl "http://localhost:9870/prof?pid=12345&duration=30"
++ * - To collect 1 minute CPU profile of current process and output in tree format (html)
++ * curl "http://localhost:9870/prof?output=tree&duration=60"
++ * - To collect 10 second heap allocation profile of current process (returns FlameGraph svg)
++ * curl "http://localhost:9870/prof?event=alloc"
++ * - To collect lock contention profile of current process (returns FlameGraph svg)
++ * curl "http://localhost:9870/prof?event=lock"
++ *
++ * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events)
++ * // Perf events:
++ * // cpu
++ * // page-faults
++ * // context-switches
++ * // cycles
++ * // instructions
++ * // cache-references
++ * // cache-misses
++ * // branches
++ * // branch-misses
++ * // bus-cycles
++ * // L1-dcache-load-misses
++ * // LLC-load-misses
++ * // dTLB-load-misses
++ * // mem:breakpoint
++ * // trace:tracepoint
++ * // Java events:
++ * // alloc
++ * // lock
++ */
++@InterfaceAudience.Private
++public class ProfileServlet extends HttpServlet {
++
++ private static final long serialVersionUID = 1L;
++ private static final Logger LOG = LoggerFactory.getLogger(ProfileServlet.class);
++
++ static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods";
++ static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin";
++ private static final String ALLOWED_METHODS = "GET";
++ private static final String CONTENT_TYPE_TEXT = "text/plain; charset=utf-8";
++ private static final String ASYNC_PROFILER_HOME_ENV = "ASYNC_PROFILER_HOME";
++ private static final String ASYNC_PROFILER_HOME_SYSTEM_PROPERTY = "async.profiler.home";
++ private static final String PROFILER_SCRIPT = "/profiler.sh";
++ private static final int DEFAULT_DURATION_SECONDS = 10;
++ private static final AtomicInteger ID_GEN = new AtomicInteger(0);
++
++ static final String OUTPUT_DIR = System.getProperty("java.io.tmpdir") + "/prof-output-hadoop";
++
++ private enum Event {
++
++ CPU("cpu"),
++ ALLOC("alloc"),
++ LOCK("lock"),
++ PAGE_FAULTS("page-faults"),
++ CONTEXT_SWITCHES("context-switches"),
++ CYCLES("cycles"),
++ INSTRUCTIONS("instructions"),
++ CACHE_REFERENCES("cache-references"),
++ CACHE_MISSES("cache-misses"),
++ BRANCHES("branches"),
++ BRANCH_MISSES("branch-misses"),
++ BUS_CYCLES("bus-cycles"),
++ L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"),
++ LLC_LOAD_MISSES("LLC-load-misses"),
++ DTLB_LOAD_MISSES("dTLB-load-misses"),
++ MEM_BREAKPOINT("mem:breakpoint"),
++ TRACE_TRACEPOINT("trace:tracepoint");
++
++ private final String internalName;
++
++ Event(final String internalName) {
++ this.internalName = internalName;
++ }
++
++ public String getInternalName() {
++ return internalName;
++ }
++
++ public static Event fromInternalName(final String name) {
++ for (Event event : values()) {
++ if (event.getInternalName().equalsIgnoreCase(name)) {
++ return event;
++ }
++ }
++
++ return null;
++ }
++ }
++
++ private enum Output {
++ SUMMARY,
++ TRACES,
++ FLAT,
++ COLLAPSED,
++ // No SVG in 2.x asyncprofiler.
++ SVG,
++ TREE,
++ JFR,
++ // In 2.x asyncprofiler, this is how you get flamegraphs.
++ HTML
++ }
++
++ private final Lock profilerLock = new ReentrantLock();
++ private transient volatile Process process;
++ private final String asyncProfilerHome;
++ private Integer pid;
++
++ public ProfileServlet() {
++ this.asyncProfilerHome = getAsyncProfilerHome();
++ this.pid = ProcessUtils.getPid();
++ LOG.info("Servlet process PID: {} asyncProfilerHome: {}", pid, asyncProfilerHome);
++ }
++
++ @Override
++ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
++ throws IOException {
++ if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(), req, resp)) {
++ resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
++ setResponseHeader(resp);
++ resp.getWriter().write("Unauthorized: Instrumentation access is not allowed!");
++ return;
++ }
++
++ // make sure async profiler home is set
++ if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
++ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
++ setResponseHeader(resp);
++ resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n"
++ + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n"
++ + "environment is properly configured.");
++ return;
++ }
++
++ // if pid is explicitly specified, use it else default to current process
++ pid = getInteger(req, "pid", pid);
++
++ // if pid is not specified in query param and if current process pid cannot be determined
++ if (pid == null) {
++ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
++ setResponseHeader(resp);
++ resp.getWriter().write(
++ "'pid' query parameter unspecified or unable to determine PID of current process.");
++ return;
++ }
++
++ final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
++ final Output output = getOutput(req);
++ final Event event = getEvent(req);
++ final Long interval = getLong(req, "interval");
++ final Integer jstackDepth = getInteger(req, "jstackdepth", null);
++ final Long bufsize = getLong(req, "bufsize");
++ final boolean thread = req.getParameterMap().containsKey("thread");
++ final boolean simple = req.getParameterMap().containsKey("simple");
++ final Integer width = getInteger(req, "width", null);
++ final Integer height = getInteger(req, "height", null);
++ final Double minwidth = getMinWidth(req);
++ final boolean reverse = req.getParameterMap().containsKey("reverse");
++
++ if (process == null || !process.isAlive()) {
++ try {
++ int lockTimeoutSecs = 3;
++ if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
++ try {
++ File outputFile = new File(OUTPUT_DIR,
++ "async-prof-pid-" + pid + "-" + event.name().toLowerCase() + "-" + ID_GEN
++ .incrementAndGet() + "." + output.name().toLowerCase());
++ List cmd = new ArrayList<>();
++ cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
++ cmd.add("-e");
++ cmd.add(event.getInternalName());
++ cmd.add("-d");
++ cmd.add("" + duration);
++ cmd.add("-o");
++ cmd.add(output.name().toLowerCase());
++ cmd.add("-f");
++ cmd.add(outputFile.getAbsolutePath());
++ if (interval != null) {
++ cmd.add("-i");
++ cmd.add(interval.toString());
++ }
++ if (jstackDepth != null) {
++ cmd.add("-j");
++ cmd.add(jstackDepth.toString());
++ }
++ if (bufsize != null) {
++ cmd.add("-b");
++ cmd.add(bufsize.toString());
++ }
++ if (thread) {
++ cmd.add("-t");
++ }
++ if (simple) {
++ cmd.add("-s");
++ }
++ if (width != null) {
++ cmd.add("--width");
++ cmd.add(width.toString());
++ }
++ if (height != null) {
++ cmd.add("--height");
++ cmd.add(height.toString());
++ }
++ if (minwidth != null) {
++ cmd.add("--minwidth");
++ cmd.add(minwidth.toString());
++ }
++ if (reverse) {
++ cmd.add("--reverse");
++ }
++ cmd.add(pid.toString());
++ process = ProcessUtils.runCmdAsync(cmd);
++
++ // set response and set refresh header to output location
++ setResponseHeader(resp);
++ resp.setStatus(HttpServletResponse.SC_ACCEPTED);
++ String relativeUrl = "/prof-output-hadoop/" + outputFile.getName();
++ resp.getWriter().write("Started [" + event.getInternalName()
++ + "] profiling. This page will automatically redirect to " + relativeUrl + " after "
++ + duration + " seconds. "
++ + "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async "
++ + "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler."
++ + "\n\nCommand:\n" + Joiner.on(" ").join(cmd));
++
++ // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified
++ // via url param
++ int refreshDelay = getInteger(req, "refreshDelay", 0);
++
++ // instead of sending redirect, set auto-refresh so that browsers will refresh
++ // with redirected url
++ resp.setHeader("Refresh", (duration + refreshDelay) + ";" + relativeUrl);
++ resp.getWriter().flush();
++ } finally {
++ profilerLock.unlock();
++ }
++ } else {
++ setResponseHeader(resp);
++ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
++ resp.getWriter()
++ .write("Unable to acquire lock. Another instance of profiler might be running.");
++ LOG.warn("Unable to acquire lock in {} seconds. Another instance of profiler might be"
++ + " running.", lockTimeoutSecs);
++ }
++ } catch (InterruptedException e) {
++ LOG.warn("Interrupted while acquiring profile lock.", e);
++ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
++ }
++ } else {
++ setResponseHeader(resp);
++ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
++ resp.getWriter().write("Another instance of profiler is already running.");
++ }
++ }
++
++ private Integer getInteger(final HttpServletRequest req, final String param,
++ final Integer defaultValue) {
++ final String value = req.getParameter(param);
++ if (value != null) {
++ try {
++ return Integer.valueOf(value);
++ } catch (NumberFormatException e) {
++ return defaultValue;
++ }
++ }
++ return defaultValue;
++ }
++
++ private Long getLong(final HttpServletRequest req, final String param) {
++ final String value = req.getParameter(param);
++ if (value != null) {
++ try {
++ return Long.valueOf(value);
++ } catch (NumberFormatException e) {
++ return null;
++ }
++ }
++ return null;
++ }
++
++ private Double getMinWidth(final HttpServletRequest req) {
++ final String value = req.getParameter("minwidth");
++ if (value != null) {
++ try {
++ return Double.valueOf(value);
++ } catch (NumberFormatException e) {
++ return null;
++ }
++ }
++ return null;
++ }
++
++ private Event getEvent(final HttpServletRequest req) {
++ final String eventArg = req.getParameter("event");
++ if (eventArg != null) {
++ Event event = Event.fromInternalName(eventArg);
++ return event == null ? Event.CPU : event;
++ }
++ return Event.CPU;
++ }
++
++ private Output getOutput(final HttpServletRequest req) {
++ final String outputArg = req.getParameter("output");
++ if (req.getParameter("output") != null) {
++ try {
++ return Output.valueOf(outputArg.trim().toUpperCase());
++ } catch (IllegalArgumentException e) {
++ return Output.HTML;
++ }
++ }
++ return Output.HTML;
++ }
++
++ static void setResponseHeader(final HttpServletResponse response) {
++ response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS);
++ response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
++ response.setContentType(CONTENT_TYPE_TEXT);
++ }
++
++ static String getAsyncProfilerHome() {
++ String asyncProfilerHome = System.getenv(ASYNC_PROFILER_HOME_ENV);
++ // if ENV is not set, see if -Dasync.profiler.home=/path/to/async/profiler/home is set
++ if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
++ asyncProfilerHome = System.getProperty(ASYNC_PROFILER_HOME_SYSTEM_PROPERTY);
++ }
++
++ return asyncProfilerHome;
++ }
++
++}
+diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfilerDisabledServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfilerDisabledServlet.java
+new file mode 100644
+index 000000000000..459485ffa5b5
+--- /dev/null
++++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/ProfilerDisabledServlet.java
+@@ -0,0 +1,44 @@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.http;
++
++import java.io.IOException;
++import javax.servlet.http.HttpServlet;
++import javax.servlet.http.HttpServletRequest;
++import javax.servlet.http.HttpServletResponse;
++
++import org.apache.hadoop.classification.InterfaceAudience;
++
++/**
++ * Servlet for disabled async-profiler.
++ */
++@InterfaceAudience.Private
++public class ProfilerDisabledServlet extends HttpServlet {
++
++ @Override
++ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
++ throws IOException {
++ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
++ ProfileServlet.setResponseHeader(resp);
++ resp.getWriter().write("The profiler servlet was disabled at startup.\n\n"
++ + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n"
++ + "environment is properly configured.");
++ }
++
++}
+diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProcessUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProcessUtils.java
+new file mode 100644
+index 000000000000..cf653b9c912c
+--- /dev/null
++++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProcessUtils.java
+@@ -0,0 +1,74 @@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.util;
++
++import java.io.IOException;
++import java.lang.management.ManagementFactory;
++import java.util.List;
++
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++import org.apache.hadoop.classification.InterfaceAudience;
++
++/**
++ * Process related utilities.
++ */
++@InterfaceAudience.Private
++public final class ProcessUtils {
++
++ private static final Logger LOG = LoggerFactory.getLogger(ProcessUtils.class);
++
++ private ProcessUtils() {
++ // no-op
++ }
++
++ public static Integer getPid() {
++ // JVM_PID can be exported in service start script
++ String pidStr = System.getenv("JVM_PID");
++
++ // In case if it is not set correctly, fallback to mxbean which is implementation specific.
++ if (pidStr == null || pidStr.trim().isEmpty()) {
++ String name = ManagementFactory.getRuntimeMXBean().getName();
++ if (name != null) {
++ int idx = name.indexOf("@");
++ if (idx != -1) {
++ pidStr = name.substring(0, name.indexOf("@"));
++ }
++ }
++ }
++ try {
++ if (pidStr != null) {
++ return Integer.valueOf(pidStr);
++ }
++ } catch (NumberFormatException ignored) {
++ // ignore
++ }
++ return null;
++ }
++
++ public static Process runCmdAsync(List cmd) {
++ try {
++ LOG.info("Running command async: {}", cmd);
++ return new ProcessBuilder(cmd).inheritIO().start();
++ } catch (IOException e) {
++ throw new IllegalStateException(e);
++ }
++ }
++}
+diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+index 5296e882df55..b1ab3e390793 100644
+--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
++++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+@@ -69,7 +69,7 @@
+ false
+
+ Indicates if administrator ACLs are required to access
+- instrumentation servlets (JMX, METRICS, CONF, STACKS).
++ instrumentation servlets (JMX, METRICS, CONF, STACKS, PROF).
+
+
+
+diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/AsyncProfilerServlet.md b/hadoop-common-project/hadoop-common/src/site/markdown/AsyncProfilerServlet.md
+new file mode 100644
+index 000000000000..4b93cc219a5e
+--- /dev/null
++++ b/hadoop-common-project/hadoop-common/src/site/markdown/AsyncProfilerServlet.md
+@@ -0,0 +1,145 @@
++
++
++Async Profiler Servlet for Hadoop
++========================================
++
++
++
++Purpose
++-------
++
++This document describes how to configure and use async profiler
++with Hadoop applications.
++Async profiler is a low overhead sampling profiler for Java that
++does not suffer from Safepoint bias problem. It features
++HotSpot-specific APIs to collect stack traces and to track memory
++allocations. The profiler works with OpenJDK, Oracle JDK and other
++Java runtimes based on the HotSpot JVM.
++
++Hadoop profiler servlet supports Async Profiler major versions
++1.x and 2.x.
++
++Prerequisites
++-------------
++
++Make sure Hadoop is installed, configured and setup correctly.
++For more information see:
++
++* [Single Node Setup](./SingleCluster.html) for first-time users.
++* [Cluster Setup](./ClusterSetup.html) for large, distributed clusters.
++
++Go to https://github.com/jvm-profiling-tools/async-profiler,
++download a release appropriate for your platform, and install
++on every cluster host.
++
++Set `ASYNC_PROFILER_HOME` in the environment (put it in hadoop-env.sh)
++to the root directory of the async-profiler install location, or pass
++it on the Hadoop daemon's command line as a system property as
++`-Dasync.profiler.home=/path/to/async-profiler`.
++
++
++Usage
++--------
++
++Once the prerequisites have been satisfied, access to the async-profiler
++is available by using Namenode or ResourceManager UI.
++
++Following options from async-profiler can be specified as query paramater.
++* `-e event` profiling event: cpu|alloc|lock|cache-misses etc.
++* `-d duration` run profiling for 'duration' seconds (integer)
++* `-i interval` sampling interval in nanoseconds (long)
++* `-j jstackdepth` maximum Java stack depth (integer)
++* `-b bufsize` frame buffer size (long)
++* `-t` profile different threads separately
++* `-s` simple class names instead of FQN
++* `-o fmt[,fmt...]` output format: summary|traces|flat|collapsed|svg|tree|jfr|html
++* `--width px` SVG width pixels (integer)
++* `--height px` SVG frame height pixels (integer)
++* `--minwidth px` skip frames smaller than px (double)
++* `--reverse` generate stack-reversed FlameGraph / Call tree
++
++
++Example:
++If Namenode http address is localhost:9870, and ResourceManager http
++address is localhost:8088, ProfileServlet running with async-profiler
++setup can be accessed with http://localhost:9870/prof and
++http://localhost:8088/prof for Namenode and ResourceManager processes
++respectively.
++
++Diving deep into some params:
++
++* To collect 10 second CPU profile of current process
++ (returns FlameGraph svg)
++ * `curl http://localhost:9870/prof` (FlameGraph svg for Namenode)
++ * `curl http://localhost:8088/prof` (FlameGraph svg for ResourceManager)
++* To collect 10 second CPU profile of pid 12345 (returns FlameGraph svg)
++ * `curl http://localhost:9870/prof?pid=12345` (For instance, provide
++ pid of Datanode here)
++* To collect 30 second CPU profile of pid 12345 (returns FlameGraph svg)
++ * `curl http://localhost:9870/prof?pid=12345&duration=30`
++* To collect 1 minute CPU profile of current process and output in tree
++ format (html)
++ * `curl http://localhost:9870/prof?output=tree&duration=60`
++* To collect 10 second heap allocation profile of current process
++ (returns FlameGraph svg)
++ * `curl http://localhost:9870/prof?event=alloc`
++* To collect lock contention profile of current process
++ (returns FlameGraph svg)
++ * `curl http://localhost:9870/prof?event=lock`
++
++
++The following event types are supported by async-profiler.
++Use the 'event' parameter to specify. Default is 'cpu'.
++Not all operating systems will support all types.
++
++Perf events:
++
++* cpu
++* page-faults
++* context-switches
++* cycles
++* instructions
++* cache-references
++* cache-misses
++* branches
++* branch-misses
++* bus-cycles
++* L1-dcache-load-misses
++* LLC-load-misses
++* dTLB-load-misses
++
++Java events:
++
++* alloc
++* lock
++
++The following output formats are supported.
++Use the 'output' parameter to specify. Default is 'flamegraph'.
++
++Output formats:
++
++* summary: A dump of basic profiling statistics.
++* traces: Call traces.
++* flat: Flat profile (top N hot methods).
++* collapsed: Collapsed call traces in the format used by FlameGraph
++ script. This is a collection of call stacks, where each line is a
++ semicolon separated list of frames followed by a counter.
++* svg: FlameGraph in SVG format.
++* tree: Call tree in HTML format.
++* jfr: Call traces in Java Flight Recorder format.
++
++The 'duration' parameter specifies how long to collect trace data
++before generating output, specified in seconds. The default is 10 seconds.
++
+diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestDisabledProfileServlet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestDisabledProfileServlet.java
+new file mode 100644
+index 000000000000..ce068bb6f1cf
+--- /dev/null
++++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestDisabledProfileServlet.java
+@@ -0,0 +1,95 @@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.http;
++
++import java.io.IOException;
++import java.net.HttpURLConnection;
++import java.net.URL;
++import javax.servlet.http.HttpServletResponse;
++
++import org.junit.AfterClass;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++/**
++ * Small test to cover default disabled prof endpoint.
++ */
++public class TestDisabledProfileServlet extends HttpServerFunctionalTest {
++
++ private static HttpServer2 server;
++ private static URL baseUrl;
++
++ @BeforeClass
++ public static void setup() throws Exception {
++ server = createTestServer();
++ server.start();
++ baseUrl = getServerURL(server);
++ }
++
++ @AfterClass
++ public static void cleanup() throws Exception {
++ server.stop();
++ }
++
++ @Test
++ public void testQuery() throws Exception {
++ try {
++ readOutput(new URL(baseUrl, "/prof"));
++ throw new IllegalStateException("Should not reach here");
++ } catch (IOException e) {
++ assertTrue(e.getMessage()
++ .contains(HttpServletResponse.SC_INTERNAL_SERVER_ERROR + " for URL: " + baseUrl));
++ }
++
++ // CORS headers
++ HttpURLConnection conn =
++ (HttpURLConnection) new URL(baseUrl, "/prof").openConnection();
++ assertEquals("GET", conn.getHeaderField(ProfileServlet.ACCESS_CONTROL_ALLOW_METHODS));
++ assertNotNull(conn.getHeaderField(ProfileServlet.ACCESS_CONTROL_ALLOW_ORIGIN));
++ conn.disconnect();
++ }
++
++ @Test
++ public void testRequestMethods() throws IOException {
++ HttpURLConnection connection = getConnection("PUT");
++ assertEquals("Unexpected response code", HttpServletResponse.SC_METHOD_NOT_ALLOWED,
++ connection.getResponseCode());
++ connection.disconnect();
++ connection = getConnection("POST");
++ assertEquals("Unexpected response code", HttpServletResponse.SC_METHOD_NOT_ALLOWED,
++ connection.getResponseCode());
++ connection.disconnect();
++ connection = getConnection("DELETE");
++ assertEquals("Unexpected response code", HttpServletResponse.SC_METHOD_NOT_ALLOWED,
++ connection.getResponseCode());
++ connection.disconnect();
++ connection = getConnection("GET");
++ assertEquals("Unexpected response code", HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
++ connection.getResponseCode());
++ connection.disconnect();
++ }
++
++ private HttpURLConnection getConnection(final String method) throws IOException {
++ URL url = new URL(baseUrl, "/prof");
++ HttpURLConnection conn = (HttpURLConnection) url.openConnection();
++ conn.setRequestMethod(method);
++ return conn;
++ }
++
++}
+diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+index 0686e788fe0d..a06ccdd25eb2 100644
+--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
++++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+@@ -1205,9 +1205,10 @@ Name | Description
+ /logs | Display log files
+ /stacks | Display JVM stacks
+ /static/index.html | The static home page
++/prof | Async Profiler endpoint
+
+ To control the access to servlet `/conf`, `/jmx`, `/logLevel`, `/logs`,
+-and `/stacks`, configure the following properties in `kms-site.xml`:
++`/stacks` and `/prof`, configure the following properties in `kms-site.xml`:
+
+ ```xml
+
+@@ -1221,7 +1222,7 @@ and `/stacks`, configure the following properties in `kms-site.xml`:
+ true
+
+ Indicates if administrator ACLs are required to access
+- instrumentation servlets (JMX, METRICS, CONF, STACKS).
++ instrumentation servlets (JMX, METRICS, CONF, STACKS, PROF).
+
+
+
+diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
+index 2d0a5b8cd2e7..66f74d13d25a 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
++++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/ServerSetup.md.vm
+@@ -162,9 +162,10 @@ Name | Description
+ /logs | Display log files
+ /stacks | Display JVM stacks
+ /static/index.html | The static home page
++/prof | Async Profiler endpoint
+
+ To control the access to servlet `/conf`, `/jmx`, `/logLevel`, `/logs`,
+-and `/stacks`, configure the following properties in `httpfs-site.xml`:
++`/stacks` and `/prof`, configure the following properties in `httpfs-site.xml`:
+
+ ```xml
+
+@@ -178,7 +179,7 @@ and `/stacks`, configure the following properties in `httpfs-site.xml`:
+ true
+
+ Indicates if administrator ACLs are required to access
+- instrumentation servlets (JMX, METRICS, CONF, STACKS).
++ instrumentation servlets (JMX, METRICS, CONF, STACKS, PROF).
+
+
+
+diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
+index 5e0cf66b4f92..f74a5dd4f7b5 100644
+--- a/hadoop-project/src/site/site.xml
++++ b/hadoop-project/src/site/site.xml
+@@ -72,6 +72,7 @@
+
+
+
++
+
+
+
+
+
+
+
++ * - -e event profiling event: cpu|alloc|lock|cache-misses etc.
++ * - -d duration run profiling for 'duration' seconds (integer), default 10s
++ * - -i interval sampling interval in nanoseconds (long), default 10ms
++ * - -j jstackdepth maximum Java stack depth (integer), default 2048
++ * - -t profile different threads separately
++ * - -s simple class names instead of FQN
++ * - -g print method signatures
++ * - -a annotate Java methods
++ * - -l prepend library names
++ * - -o fmt output format: flat|traces|collapsed|flamegraph|tree|jfr
++ * - --minwidth pct skip frames smaller than pct% (double)
++ * - --reverse generate stack-reversed FlameGraph / Call tree
++ *
+ * Example:
+- * - To collect 30 second CPU profile of current process (returns FlameGraph svg)
+- * curl "http://localhost:10002/prof"
+- * - To collect 1 minute CPU profile of current process and output in tree format (html)
+- * curl "http://localhost:10002/prof?output=tree&duration=60"
+- * - To collect 30 second heap allocation profile of current process (returns FlameGraph svg)
+- * curl "http://localhost:10002/prof?event=alloc"
+- * - To collect lock contention profile of current process (returns FlameGraph svg)
+- * curl "http://localhost:10002/prof?event=lock"
+- * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events)
+- * // Perf events:
+- * // cpu
+- * // page-faults
+- * // context-switches
+- * // cycles
+- * // instructions
+- * // cache-references
+- * // cache-misses
+- * // branches
+- * // branch-misses
+- * // bus-cycles
+- * // L1-dcache-load-misses
+- * // LLC-load-misses
+- * // dTLB-load-misses
+- * // mem:breakpoint
+- * // trace:tracepoint
+- * // Java events:
+- * // alloc
+- * // lock
++ *
++ * - To collect 30 second CPU profile of current process (returns FlameGraph svg):
++ * {@code curl http://localhost:10002/prof"}
++ * - To collect 1 minute CPU profile of current process and output in tree format (html)
++ * {@code curl "http://localhost:10002/prof?output=tree&duration=60"}
++ * - To collect 30 second heap allocation profile of current process (returns FlameGraph):
++ * {@code curl "http://localhost:10002/prof?event=alloc"}
++ * - To collect lock contention profile of current process (returns FlameGraph):
++ * {@code curl "http://localhost:10002/prof?event=lock"}
++ *
++ * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all
++ * events).
++ * Basic events:
++ *
++ * - cpu
++ * - alloc
++ * - lock
++ * - wall
++ * - itimer
++ *
++ * Perf events:
++ *
++ * - L1-dcache-load-misses
++ * - LLC-load-misses
++ * - branch-instructions
++ * - branch-misses
++ * - bus-cycles
++ * - cache-misses
++ * - cache-references
++ * - context-switches
++ * - cpu
++ * - cycles
++ * - dTLB-load-misses
++ * - instructions
++ * - mem:breakpoint
++ * - page-faults
++ * - trace:tracepoint
++ *
+ */
+ @InterfaceAudience.Private
+ public class ProfileServlet extends HttpServlet {
+@@ -104,19 +113,21 @@
+ CPU("cpu"),
+ ALLOC("alloc"),
+ LOCK("lock"),
+- PAGE_FAULTS("page-faults"),
++ WALL("wall"),
++ ITIMER("itimer"),
++ BRANCH_INSTRUCTIONS("branch-instructions"),
++ BRANCH_MISSES("branch-misses"),
++ BUS_CYCLES("bus-cycles"),
++ CACHE_MISSES("cache-misses"),
++ CACHE_REFERENCES("cache-references"),
+ CONTEXT_SWITCHES("context-switches"),
+ CYCLES("cycles"),
++ DTLB_LOAD_MISSES("dTLB-load-misses"),
+ INSTRUCTIONS("instructions"),
+- CACHE_REFERENCES("cache-references"),
+- CACHE_MISSES("cache-misses"),
+- BRANCHES("branches"),
+- BRANCH_MISSES("branch-misses"),
+- BUS_CYCLES("bus-cycles"),
+ L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"),
+ LLC_LOAD_MISSES("LLC-load-misses"),
+- DTLB_LOAD_MISSES("dTLB-load-misses"),
+ MEM_BREAKPOINT("mem:breakpoint"),
++ PAGE_FAULTS("page-faults"),
+ TRACE_TRACEPOINT("trace:tracepoint"),;
+
+ private final String internalName;
+@@ -125,11 +136,11 @@
+ this.internalName = internalName;
+ }
+
+- public String getInternalName() {
++ String getInternalName() {
+ return internalName;
+ }
+
+- public static Event fromInternalName(final String name) {
++ static Event fromInternalName(final String name) {
+ for (Event event : values()) {
+ if (event.getInternalName().equalsIgnoreCase(name)) {
+ return event;
+@@ -140,35 +151,31 @@
+ }
+ }
+
+- enum Output {
+- SUMMARY,
+- TRACES,
++ private enum Output {
++ COLLAPSED,
++ FLAMEGRAPH,
+ FLAT,
+- COLLAPSED,
+- // No SVG in 2.x asyncprofiler.
+- SVG,
+- TREE,
+ JFR,
+- // In 2.x asyncprofiler, this is how you get flamegraphs.
+- HTML
++ TRACES,
++ TREE
+ }
+
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED",
+- justification = "This class is never serialized nor restored.")
+- private transient Lock profilerLock = new ReentrantLock();
++ justification = "This class is never serialized nor restored.")
++ private final transient Lock profilerLock = new ReentrantLock();
+ private transient volatile Process process;
+- private String asyncProfilerHome;
++ private final String asyncProfilerHome;
+ private Integer pid;
+
+ public ProfileServlet() {
+ this.asyncProfilerHome = getAsyncProfilerHome();
+ this.pid = ProcessUtils.getPid();
+- LOG.info("Servlet process PID: " + pid + " asyncProfilerHome: " + asyncProfilerHome);
++ LOG.info("Servlet process PID: {} asyncProfilerHome: {}", pid, asyncProfilerHome);
+ }
+
+ @Override
+ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
+- throws IOException {
++ throws IOException {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), req, resp)) {
+ resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+ setResponseHeader(resp);
+@@ -180,10 +187,11 @@
+ if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ setResponseHeader(resp);
+- resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" +
+- "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" +
+- "environment is properly configured. For more information please see\n" +
+- "http://hbase.apache.org/book.html#profiler\n");
++ resp.getWriter()
++ .write("ASYNC_PROFILER_HOME env is not set.\n\n"
++ + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n"
++ + "environment is properly configured. For more information please see\n"
++ + "https://hbase.apache.org/book.html#profiler\n");
+ return;
+ }
+
+@@ -194,42 +202,39 @@
+ if (pid == null) {
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ setResponseHeader(resp);
+- resp.getWriter().write(
+- "'pid' query parameter unspecified or unable to determine PID of current process.");
++ resp.getWriter()
++ .write("'pid' query parameter unspecified or unable to determine PID of current process.");
+ return;
+ }
+
+- final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
+- final Output output = getOutput(req);
+- final Event event = getEvent(req);
+- final Long interval = getLong(req, "interval");
+- final Integer jstackDepth = getInteger(req, "jstackdepth", null);
+- final Long bufsize = getLong(req, "bufsize");
+- final boolean thread = req.getParameterMap().containsKey("thread");
+- final boolean simple = req.getParameterMap().containsKey("simple");
+- final Integer width = getInteger(req, "width", null);
+- final Integer height = getInteger(req, "height", null);
+- final Double minwidth = getMinWidth(req);
+- final boolean reverse = req.getParameterMap().containsKey("reverse");
++ Event event = getEvent(req);
++ int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
++ Long interval = getLong(req, "interval");
++ Integer jstackDepth = getInteger(req, "jstackdepth", null);
++ boolean thread = req.getParameterMap().containsKey("thread");
++ boolean simple = req.getParameterMap().containsKey("simple");
++ boolean signature = req.getParameterMap().containsKey("signature");
++ boolean annotate = req.getParameterMap().containsKey("annotate");
++ boolean prependLib = req.getParameterMap().containsKey("prependlib");
++ Output output = getOutput(req);
++ Double minwidth = getMinWidth(req);
++ boolean reverse = req.getParameterMap().containsKey("reverse");
+
+ if (process == null || !process.isAlive()) {
+ try {
+ int lockTimeoutSecs = 3;
+ if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
+ try {
+- File outputFile = new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" +
+- event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() + "." +
+- output.name().toLowerCase());
++ File outputFile =
++ new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + event.name().toLowerCase() + "-"
++ + ID_GEN.incrementAndGet() + "." + output.name().toLowerCase());
++
+ List cmd = new ArrayList<>();
+ cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
+ cmd.add("-e");
+ cmd.add(event.getInternalName());
+ cmd.add("-d");
+- cmd.add("" + duration);
+- cmd.add("-o");
+- cmd.add(output.name().toLowerCase());
+- cmd.add("-f");
+- cmd.add(outputFile.getAbsolutePath());
++ cmd.add(String.valueOf(duration));
+ if (interval != null) {
+ cmd.add("-i");
+ cmd.add(interval.toString());
+@@ -238,24 +243,25 @@
+ cmd.add("-j");
+ cmd.add(jstackDepth.toString());
+ }
+- if (bufsize != null) {
+- cmd.add("-b");
+- cmd.add(bufsize.toString());
+- }
+ if (thread) {
+ cmd.add("-t");
+ }
+ if (simple) {
+ cmd.add("-s");
+ }
+- if (width != null) {
+- cmd.add("--width");
+- cmd.add(width.toString());
++ if (signature) {
++ cmd.add("-g");
+ }
+- if (height != null) {
+- cmd.add("--height");
+- cmd.add(height.toString());
++ if (annotate) {
++ cmd.add("-a");
+ }
++ if (prependLib) {
++ cmd.add("-l");
++ }
++ cmd.add("-o");
++ cmd.add(output.name().toLowerCase());
++ cmd.add("-f");
++ cmd.add(outputFile.getAbsolutePath());
+ if (minwidth != null) {
+ cmd.add("--minwidth");
+ cmd.add(minwidth.toString());
+@@ -263,6 +269,7 @@
+ if (reverse) {
+ cmd.add("--reverse");
+ }
++
+ cmd.add(pid.toString());
+ process = ProcessUtils.runCmdAsync(cmd);
+
+@@ -270,11 +277,13 @@
+ setResponseHeader(resp);
+ resp.setStatus(HttpServletResponse.SC_ACCEPTED);
+ String relativeUrl = "/prof-output-hbase/" + outputFile.getName();
+- resp.getWriter().write(
+- "Started [" + event.getInternalName() +
+- "] profiling. This page will automatically redirect to " +
+- relativeUrl + " after " + duration + " seconds.\n\nCommand:\n" +
+- Joiner.on(" ").join(cmd));
++ resp.getWriter()
++ .write("Started [" + event.getInternalName()
++ + "] profiling. This page will automatically redirect to " + relativeUrl + " after "
++ + duration + " seconds. "
++ + "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async "
++ + "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler."
++ + "\n\nCommand:\n" + Joiner.on(" ").join(cmd));
+
+ // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified
+ // via url param
+@@ -290,10 +299,11 @@
+ } else {
+ setResponseHeader(resp);
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+- resp.getWriter().write(
+- "Unable to acquire lock. Another instance of profiler might be running.");
+- LOG.warn("Unable to acquire lock in " + lockTimeoutSecs +
+- " seconds. Another instance of profiler might be running.");
++ resp.getWriter()
++ .write("Unable to acquire lock. Another instance of profiler might be running.");
++ LOG.warn(
++ "Unable to acquire lock in {} seconds. Another instance of profiler might be running.",
++ lockTimeoutSecs);
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while acquiring profile lock.", e);
+@@ -306,9 +316,9 @@
+ }
+ }
+
+- private Integer getInteger(final HttpServletRequest req, final String param,
+- final Integer defaultValue) {
+- final String value = req.getParameter(param);
++ private static Integer getInteger(final HttpServletRequest req, final String param,
++ final Integer defaultValue) {
++ String value = req.getParameter(param);
+ if (value != null) {
+ try {
+ return Integer.valueOf(value);
+@@ -319,8 +329,8 @@
+ return defaultValue;
+ }
+
+- private Long getLong(final HttpServletRequest req, final String param) {
+- final String value = req.getParameter(param);
++ private static Long getLong(final HttpServletRequest req, final String param) {
++ String value = req.getParameter(param);
+ if (value != null) {
+ try {
+ return Long.valueOf(value);
+@@ -331,8 +341,8 @@
+ return null;
+ }
+
+- private Double getMinWidth(final HttpServletRequest req) {
+- final String value = req.getParameter("minwidth");
++ private static Double getMinWidth(final HttpServletRequest req) {
++ String value = req.getParameter("minwidth");
+ if (value != null) {
+ try {
+ return Double.valueOf(value);
+@@ -343,8 +353,8 @@
+ return null;
+ }
+
+- private Event getEvent(final HttpServletRequest req) {
+- final String eventArg = req.getParameter("event");
++ private static Event getEvent(final HttpServletRequest req) {
++ String eventArg = req.getParameter("event");
+ if (eventArg != null) {
+ Event event = Event.fromInternalName(eventArg);
+ return event == null ? Event.CPU : event;
+@@ -352,16 +362,16 @@
+ return Event.CPU;
+ }
+
+- private Output getOutput(final HttpServletRequest req) {
+- final String outputArg = req.getParameter("output");
++ private static Output getOutput(final HttpServletRequest req) {
++ String outputArg = req.getParameter("output");
+ if (req.getParameter("output") != null) {
+ try {
+ return Output.valueOf(outputArg.trim().toUpperCase());
+ } catch (IllegalArgumentException e) {
+- return Output.SVG;
++ return Output.FLAMEGRAPH;
+ }
+ }
+- return Output.SVG;
++ return Output.FLAMEGRAPH;
+ }
+
+ static void setResponseHeader(final HttpServletResponse response) {
+@@ -386,14 +396,14 @@
+
+ @Override
+ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
+- throws IOException {
++ throws IOException {
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ setResponseHeader(resp);
+- resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" +
+- "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" +
+- "environment is properly configured. For more information please see\n" +
+- "http://hbase.apache.org/book.html#profiler\n");
+- return;
++ resp.getWriter()
++ .write("The profiler servlet was disabled at startup.\n\n"
++ + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n"
++ + "environment is properly configured. For more information please see\n"
++ + "https://hbase.apache.org/book.html#profiler\n");
+ }
+
+ }
diff --git a/hbase/stackable/patches/2.4.17/005-HBASE-28242-2.4.17.patch b/hbase/stackable/patches/2.4.17/005-HBASE-28242-2.4.17.patch
new file mode 100644
index 000000000..b0230cb78
--- /dev/null
+++ b/hbase/stackable/patches/2.4.17/005-HBASE-28242-2.4.17.patch
@@ -0,0 +1,376 @@
+diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java
+index 4e30484384..cc67974759 100644
+--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java
++++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java
+@@ -25,9 +25,11 @@ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ import java.util.concurrent.locks.Lock;
+ import java.util.concurrent.locks.ReentrantLock;
++
+ import javax.servlet.http.HttpServlet;
+ import javax.servlet.http.HttpServletRequest;
+ import javax.servlet.http.HttpServletResponse;
++
+ import org.apache.hadoop.hbase.util.ProcessUtils;
+ import org.apache.yetus.audience.InterfaceAudience;
+ import org.slf4j.Logger;
+@@ -37,23 +39,60 @@ import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
+
+ /**
+ * Servlet that runs async-profiler as web-endpoint. Following options from async-profiler can be
+- * specified as query paramater. // -e event profiling event: cpu|alloc|lock|cache-misses etc. // -d
+- * duration run profiling for 'duration' seconds (integer) // -i interval sampling interval in
+- * nanoseconds (long) // -j jstackdepth maximum Java stack depth (integer) // -b bufsize frame
+- * buffer size (long) // -t profile different threads separately // -s simple class names instead of
+- * FQN // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html // --width
+- * px SVG width pixels (integer) // --height px SVG frame height pixels (integer) // --minwidth px
+- * skip frames smaller than px (double) // --reverse generate stack-reversed FlameGraph / Call tree
+- * Example: - To collect 30 second CPU profile of current process (returns FlameGraph svg) curl
+- * "http://localhost:10002/prof" - To collect 1 minute CPU profile of current process and output in
+- * tree format (html) curl "http://localhost:10002/prof?output=tree&duration=60" - To collect 30
+- * second heap allocation profile of current process (returns FlameGraph svg) curl
+- * "http://localhost:10002/prof?event=alloc" - To collect lock contention profile of current process
+- * (returns FlameGraph svg) curl "http://localhost:10002/prof?event=lock" Following event types are
+- * supported (default is 'cpu') (NOTE: not all OS'es support all events) // Perf events: // cpu //
+- * page-faults // context-switches // cycles // instructions // cache-references // cache-misses //
+- * branches // branch-misses // bus-cycles // L1-dcache-load-misses // LLC-load-misses //
+- * dTLB-load-misses // mem:breakpoint // trace:tracepoint // Java events: // alloc // lock
++ * specified as query parameter.
++ *
++ * - -e event profiling event: cpu|alloc|lock|cache-misses etc.
++ * - -d duration run profiling for 'duration' seconds (integer), default 10s
++ * - -i interval sampling interval in nanoseconds (long), default 10ms
++ * - -j jstackdepth maximum Java stack depth (integer), default 2048
++ * - -t profile different threads separately
++ * - -s simple class names instead of FQN
++ * - -g print method signatures
++ * - -a annotate Java methods
++ * - -l prepend library names
++ * - -o fmt output format: flat|traces|collapsed|flamegraph|tree|jfr
++ * - --minwidth pct skip frames smaller than pct% (double)
++ * - --reverse generate stack-reversed FlameGraph / Call tree
++ *
++ * Example:
++ *
++ * - To collect 30 second CPU profile of current process (returns FlameGraph svg):
++ * {@code curl http://localhost:10002/prof"}
++ * - To collect 1 minute CPU profile of current process and output in tree format (html)
++ * {@code curl "http://localhost:10002/prof?output=tree&duration=60"}
++ * - To collect 30 second heap allocation profile of current process (returns FlameGraph):
++ * {@code curl "http://localhost:10002/prof?event=alloc"}
++ * - To collect lock contention profile of current process (returns FlameGraph):
++ * {@code curl "http://localhost:10002/prof?event=lock"}
++ *
++ * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all
++ * events).
++ * Basic events:
++ *
++ * - cpu
++ * - alloc
++ * - lock
++ * - wall
++ * - itimer
++ *
++ * Perf events:
++ *
++ * - L1-dcache-load-misses
++ * - LLC-load-misses
++ * - branch-instructions
++ * - branch-misses
++ * - bus-cycles
++ * - cache-misses
++ * - cache-references
++ * - context-switches
++ * - cpu
++ * - cycles
++ * - dTLB-load-misses
++ * - instructions
++ * - mem:breakpoint
++ * - page-faults
++ * - trace:tracepoint
++ *
+ */
+ @InterfaceAudience.Private
+ public class ProfileServlet extends HttpServlet {
+@@ -76,19 +115,21 @@ public class ProfileServlet extends HttpServlet {
+ CPU("cpu"),
+ ALLOC("alloc"),
+ LOCK("lock"),
+- PAGE_FAULTS("page-faults"),
++ WALL("wall"),
++ ITIMER("itimer"),
++ BRANCH_INSTRUCTIONS("branch-instructions"),
++ BRANCH_MISSES("branch-misses"),
++ BUS_CYCLES("bus-cycles"),
++ CACHE_MISSES("cache-misses"),
++ CACHE_REFERENCES("cache-references"),
+ CONTEXT_SWITCHES("context-switches"),
+ CYCLES("cycles"),
++ DTLB_LOAD_MISSES("dTLB-load-misses"),
+ INSTRUCTIONS("instructions"),
+- CACHE_REFERENCES("cache-references"),
+- CACHE_MISSES("cache-misses"),
+- BRANCHES("branches"),
+- BRANCH_MISSES("branch-misses"),
+- BUS_CYCLES("bus-cycles"),
+ L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"),
+ LLC_LOAD_MISSES("LLC-load-misses"),
+- DTLB_LOAD_MISSES("dTLB-load-misses"),
+ MEM_BREAKPOINT("mem:breakpoint"),
++ PAGE_FAULTS("page-faults"),
+ TRACE_TRACEPOINT("trace:tracepoint"),;
+
+ private final String internalName;
+@@ -97,11 +138,11 @@ public class ProfileServlet extends HttpServlet {
+ this.internalName = internalName;
+ }
+
+- public String getInternalName() {
++ String getInternalName() {
+ return internalName;
+ }
+
+- public static Event fromInternalName(final String name) {
++ static Event fromInternalName(final String name) {
+ for (Event event : values()) {
+ if (event.getInternalName().equalsIgnoreCase(name)) {
+ return event;
+@@ -112,30 +153,26 @@ public class ProfileServlet extends HttpServlet {
+ }
+ }
+
+- enum Output {
+- SUMMARY,
+- TRACES,
+- FLAT,
++ private enum Output {
+ COLLAPSED,
+- // No SVG in 2.x asyncprofiler.
+- SVG,
+- TREE,
++ FLAMEGRAPH,
++ FLAT,
+ JFR,
+- // In 2.x asyncprofiler, this is how you get flamegraphs.
+- HTML
++ TRACES,
++ TREE
+ }
+
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED",
+ justification = "This class is never serialized nor restored.")
+- private transient Lock profilerLock = new ReentrantLock();
++ private final transient Lock profilerLock = new ReentrantLock();
+ private transient volatile Process process;
+- private String asyncProfilerHome;
++ private final String asyncProfilerHome;
+ private Integer pid;
+
+ public ProfileServlet() {
+ this.asyncProfilerHome = getAsyncProfilerHome();
+ this.pid = ProcessUtils.getPid();
+- LOG.info("Servlet process PID: " + pid + " asyncProfilerHome: " + asyncProfilerHome);
++ LOG.info("Servlet process PID: {} asyncProfilerHome: {}", pid, asyncProfilerHome);
+ }
+
+ @Override
+@@ -154,9 +191,9 @@ public class ProfileServlet extends HttpServlet {
+ setResponseHeader(resp);
+ resp.getWriter()
+ .write("ASYNC_PROFILER_HOME env is not set.\n\n"
+- + "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n"
++ + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n"
+ + "environment is properly configured. For more information please see\n"
+- + "http://hbase.apache.org/book.html#profiler\n");
++ + "https://hbase.apache.org/book.html#profiler\n");
+ return;
+ }
+
+@@ -172,18 +209,18 @@ public class ProfileServlet extends HttpServlet {
+ return;
+ }
+
+- final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
+- final Output output = getOutput(req);
+- final Event event = getEvent(req);
+- final Long interval = getLong(req, "interval");
+- final Integer jstackDepth = getInteger(req, "jstackdepth", null);
+- final Long bufsize = getLong(req, "bufsize");
+- final boolean thread = req.getParameterMap().containsKey("thread");
+- final boolean simple = req.getParameterMap().containsKey("simple");
+- final Integer width = getInteger(req, "width", null);
+- final Integer height = getInteger(req, "height", null);
+- final Double minwidth = getMinWidth(req);
+- final boolean reverse = req.getParameterMap().containsKey("reverse");
++ Event event = getEvent(req);
++ int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
++ Long interval = getLong(req, "interval");
++ Integer jstackDepth = getInteger(req, "jstackdepth", null);
++ boolean thread = req.getParameterMap().containsKey("thread");
++ boolean simple = req.getParameterMap().containsKey("simple");
++ boolean signature = req.getParameterMap().containsKey("signature");
++ boolean annotate = req.getParameterMap().containsKey("annotate");
++ boolean prependLib = req.getParameterMap().containsKey("prependlib");
++ Output output = getOutput(req);
++ Double minwidth = getMinWidth(req);
++ boolean reverse = req.getParameterMap().containsKey("reverse");
+
+ if (process == null || !process.isAlive()) {
+ try {
+@@ -198,11 +235,7 @@ public class ProfileServlet extends HttpServlet {
+ cmd.add("-e");
+ cmd.add(event.getInternalName());
+ cmd.add("-d");
+- cmd.add("" + duration);
+- cmd.add("-o");
+- cmd.add(output.name().toLowerCase());
+- cmd.add("-f");
+- cmd.add(outputFile.getAbsolutePath());
++ cmd.add(String.valueOf(duration));
+ if (interval != null) {
+ cmd.add("-i");
+ cmd.add(interval.toString());
+@@ -211,24 +244,25 @@ public class ProfileServlet extends HttpServlet {
+ cmd.add("-j");
+ cmd.add(jstackDepth.toString());
+ }
+- if (bufsize != null) {
+- cmd.add("-b");
+- cmd.add(bufsize.toString());
+- }
+ if (thread) {
+ cmd.add("-t");
+ }
+ if (simple) {
+ cmd.add("-s");
+ }
+- if (width != null) {
+- cmd.add("--width");
+- cmd.add(width.toString());
++ if (signature) {
++ cmd.add("-g");
+ }
+- if (height != null) {
+- cmd.add("--height");
+- cmd.add(height.toString());
++ if (annotate) {
++ cmd.add("-a");
+ }
++ if (prependLib) {
++ cmd.add("-l");
++ }
++ cmd.add("-o");
++ cmd.add(output.name().toLowerCase());
++ cmd.add("-f");
++ cmd.add(outputFile.getAbsolutePath());
+ if (minwidth != null) {
+ cmd.add("--minwidth");
+ cmd.add(minwidth.toString());
+@@ -236,6 +270,7 @@ public class ProfileServlet extends HttpServlet {
+ if (reverse) {
+ cmd.add("--reverse");
+ }
++
+ cmd.add(pid.toString());
+ process = ProcessUtils.runCmdAsync(cmd);
+
+@@ -246,7 +281,10 @@ public class ProfileServlet extends HttpServlet {
+ resp.getWriter()
+ .write("Started [" + event.getInternalName()
+ + "] profiling. This page will automatically redirect to " + relativeUrl + " after "
+- + duration + " seconds.\n\nCommand:\n" + Joiner.on(" ").join(cmd));
++ + duration + " seconds. "
++ + "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async "
++ + "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler."
++ + "\n\nCommand:\n" + Joiner.on(" ").join(cmd));
+
+ // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified
+ // via url param
+@@ -264,8 +302,9 @@ public class ProfileServlet extends HttpServlet {
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ resp.getWriter()
+ .write("Unable to acquire lock. Another instance of profiler might be running.");
+- LOG.warn("Unable to acquire lock in " + lockTimeoutSecs
+- + " seconds. Another instance of profiler might be running.");
++ LOG.warn(
++ "Unable to acquire lock in {} seconds. Another instance of profiler might be running.",
++ lockTimeoutSecs);
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while acquiring profile lock.", e);
+@@ -278,9 +317,9 @@ public class ProfileServlet extends HttpServlet {
+ }
+ }
+
+- private Integer getInteger(final HttpServletRequest req, final String param,
++ private static Integer getInteger(final HttpServletRequest req, final String param,
+ final Integer defaultValue) {
+- final String value = req.getParameter(param);
++ String value = req.getParameter(param);
+ if (value != null) {
+ try {
+ return Integer.valueOf(value);
+@@ -291,8 +330,8 @@ public class ProfileServlet extends HttpServlet {
+ return defaultValue;
+ }
+
+- private Long getLong(final HttpServletRequest req, final String param) {
+- final String value = req.getParameter(param);
++ private static Long getLong(final HttpServletRequest req, final String param) {
++ String value = req.getParameter(param);
+ if (value != null) {
+ try {
+ return Long.valueOf(value);
+@@ -303,8 +342,8 @@ public class ProfileServlet extends HttpServlet {
+ return null;
+ }
+
+- private Double getMinWidth(final HttpServletRequest req) {
+- final String value = req.getParameter("minwidth");
++ private static Double getMinWidth(final HttpServletRequest req) {
++ String value = req.getParameter("minwidth");
+ if (value != null) {
+ try {
+ return Double.valueOf(value);
+@@ -315,8 +354,8 @@ public class ProfileServlet extends HttpServlet {
+ return null;
+ }
+
+- private Event getEvent(final HttpServletRequest req) {
+- final String eventArg = req.getParameter("event");
++ private static Event getEvent(final HttpServletRequest req) {
++ String eventArg = req.getParameter("event");
+ if (eventArg != null) {
+ Event event = Event.fromInternalName(eventArg);
+ return event == null ? Event.CPU : event;
+@@ -324,16 +363,16 @@ public class ProfileServlet extends HttpServlet {
+ return Event.CPU;
+ }
+
+- private Output getOutput(final HttpServletRequest req) {
+- final String outputArg = req.getParameter("output");
++ private static Output getOutput(final HttpServletRequest req) {
++ String outputArg = req.getParameter("output");
+ if (req.getParameter("output") != null) {
+ try {
+ return Output.valueOf(outputArg.trim().toUpperCase());
+ } catch (IllegalArgumentException e) {
+- return Output.SVG;
++ return Output.FLAMEGRAPH;
+ }
+ }
+- return Output.SVG;
++ return Output.FLAMEGRAPH;
+ }
+
+ static void setResponseHeader(final HttpServletResponse response) {
+@@ -365,8 +404,7 @@ public class ProfileServlet extends HttpServlet {
+ .write("The profiler servlet was disabled at startup.\n\n"
+ + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n"
+ + "environment is properly configured. For more information please see\n"
+- + "http://hbase.apache.org/book.html#profiler\n");
+- return;
++ + "https://hbase.apache.org/book.html#profiler\n");
+ }
+
+ }
diff --git a/upload_new_async-profiler_version.sh b/upload_new_async-profiler_version.sh
new file mode 100755
index 000000000..23530a49c
--- /dev/null
+++ b/upload_new_async-profiler_version.sh
@@ -0,0 +1,55 @@
+#!/usr/bin/env bash
+
+set -e
+
+VERSION=${1:?"Missing version number argument (arg 1)"}
+NEXUS_USER=${2:?"Missing Nexus username argument (arg 2)"}
+
+ARCHITECTURES=(
+ x64
+ arm64
+)
+
+read -r -s -p "Nexus Password: " NEXUS_PASSWORD
+echo ""
+
+# async-profiler does not currently publish signatures or SBOMs (as of
+# 2024-01-30, latest version at this point v3.0)
+
+# https://stackoverflow.com/questions/4632028/how-to-create-a-temporary-directory
+# Find the directory name of the script
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+# the temp directory used, within $DIR
+WORK_DIR=$(mktemp -d -p "$DIR")
+
+# check if tmp dir was created
+if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then
+ echo "Could not create temp dir"
+ exit 1
+fi
+
+# deletes the temp directory
+function cleanup {
+ rm -rf "$WORK_DIR"
+}
+
+# register the cleanup function to be called on the EXIT signal
+trap cleanup EXIT
+
+cd "$WORK_DIR" || exit
+
+for arch in "${ARCHITECTURES[@]}"; do
+ file=async-profiler-$VERSION-linux-$arch.tar.gz
+
+ echo "Downloading $file from github.com"
+ curl --fail -LOs "https://github.com/async-profiler/async-profiler/releases/download/v$VERSION/$file"
+
+ echo "Uploading $file to Nexus"
+ curl --fail -u "$NEXUS_USER:$NEXUS_PASSWORD" \
+ --upload-file "$file" \
+ 'https://repo.stackable.tech/repository/packages/async-profiler/'
+done
+
+echo "Successfully uploaded new version $VERSION to Nexus"
+echo "https://repo.stackable.tech/service/rest/repository/browse/packages/async-profiler/"