From b818c6fd84e04c5a3aa13a789bceb805c29c63aa Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Mon, 3 Apr 2023 11:45:10 +0800
Subject: [PATCH 001/404] [KYUUBI #4647] Bump Maven from 3.8.7 to 3.9.1 and
Mvnd from 0.9.0 to 1.0-m6
### _Why are the changes needed?_
- bump Maven from 3.8.7 to 3.9.1, 3.9.1 fixed the performance issue [MNG-7677](https://issues.apache.org/jira/browse/MNG-7677) in 3.9.0, release notes: https://maven.apache.org/docs/3.9.1/release-notes.html
- Mvnd from 0.9.0 to 1.0-m6 (with embedded maven 3.9.1), release notes: https://github.com/apache/maven-mvnd/releases/tag/1.0-m6
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4647 from bowenliang123/maven-3.9.1.
Closes #4647
f803394df [liangbowen] remove property
efd199f7a [liangbowen] fix
87e18d70a [Bowen Liang] Update build/mvnd
10f4a25ff [liangbowen] bump Maven from 3.8.7 to 3.9.1, and Mvnd from 0.9.0 to 1.0-m6 (with embedded maven 3.9.1)
Lead-authored-by: liangbowen
Co-authored-by: Bowen Liang
Signed-off-by: liangbowen
---
build/mvnd | 9 +++++----
pom.xml | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/build/mvnd b/build/mvnd
index 81a6f5c20a5..f0c72332fa7 100755
--- a/build/mvnd
+++ b/build/mvnd
@@ -94,8 +94,9 @@ function get_os_arch() {
# Determine the Mvnd version from the root pom.xml file and
# install mvnd under the build/ folder if needed.
function install_mvnd() {
- local MVND_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}')
local MVN_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}')
+ local MVND_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}')
+ local MVND_MVN_SHORT_VERSION=$(echo "$MVN_VERSION" | awk -F . '{print $1$2}')
MVND_BIN="$(command -v mvnd)"
if [ "$MVND_BIN" ]; then
local MVND_DETECTED_VERSION="$(mvnd -v 2>&1 | grep '(mvnd)' | awk '{print $5}')"
@@ -111,10 +112,10 @@ function install_mvnd() {
install_app \
"${APACHE_MIRROR}/maven/mvnd/${MVND_VERSION}" \
- "maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}.tar.gz" \
- "maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
+ "maven-mvnd-${MVND_VERSION}-m${MVND_MVN_SHORT_VERSION}-${OS_TYPE}-${ARCH}.tar.gz" \
+ "maven-mvnd-${MVND_VERSION}-m${MVND_MVN_SHORT_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
- MVND_BIN="${_DIR}/maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
+ MVND_BIN="${_DIR}/maven-mvnd-${MVND_VERSION}-m${MVND_MVN_SHORT_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
else
if [ "$(version $MVN_DETECTED_VERSION)" -ne "$(version $MVN_VERSION)" ]; then
echo "Mvnd $MVND_DETECTED_VERSION embedded maven version $MVN_DETECTED_VERSION is not equivalent to $MVN_VERSION required in pom."
diff --git a/pom.xml b/pom.xml
index b2b0341e2e9..09ee14c08b4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -109,8 +109,8 @@
1.8
- 3.8.7
- 0.9.0
+ 3.9.1
+ 1.0-m6${java.version}${java.version}2.12.17
From a947dcb792b17f3fa40f03ec03397b6670b0b32a Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Mon, 3 Apr 2023 18:51:27 +0800
Subject: [PATCH 002/404] [KYUUBI #4655] [DOCS] Enrich docs for Kyuubi Hive
JDBC driver
### _Why are the changes needed?_
Update the outdated words for Kyuubi Hive JDBC driver, and supply more details about Kerberos authentication.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [x] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4655 from pan3793/docs-v2.
Closes #4655
9d2cb4875 [Cheng Pan] Update docs/quick_start/quick_start_with_jdbc.md
00af58e27 [Cheng Pan] address comments
48bf21664 [Cheng Pan] Update docs/quick_start/quick_start_with_jupyter.md
054e2bea0 [Cheng Pan] nit
a0a80b818 [Cheng Pan] nit
41ff97de3 [Cheng Pan] [DOCS] Enrich docs for Kyuubi Hive JDBC Driver
Authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
README.md | 2 +-
docs/appendix/terminology.md | 4 +-
docs/client/jdbc/hive_jdbc.md | 14 +--
docs/client/jdbc/kyuubi_jdbc.rst | 115 ++++++++++++++-----
docs/extensions/server/authentication.rst | 4 +-
docs/quick_start/quick_start_with_helm.md | 2 +-
docs/quick_start/quick_start_with_jdbc.md | 114 +++++++++---------
docs/quick_start/quick_start_with_jupyter.md | 2 +-
8 files changed, 159 insertions(+), 98 deletions(-)
diff --git a/README.md b/README.md
index e54f6fac00d..43efc4c66d3 100644
--- a/README.md
+++ b/README.md
@@ -84,7 +84,7 @@ HiveServer2 can identify and authenticate a caller, and then if the caller also
Kyuubi extends the use of STS in a multi-tenant model based on a unified interface and relies on the concept of multi-tenancy to interact with cluster managers to finally gain the ability of resources sharing/isolation and data security. The loosely coupled architecture of the Kyuubi server and engine dramatically improves the client concurrency and service stability of the service itself.
-#### DataLake/LakeHouse Support
+#### DataLake/Lakehouse Support
The vision of Kyuubi is to unify the portal and become an easy-to-use data lake management platform. Different kinds of workloads, such as ETL processing and BI analytics, can be supported by one platform, using one copy of data, with one SQL interface.
diff --git a/docs/appendix/terminology.md b/docs/appendix/terminology.md
index b81fa25fe87..b349d77c7bd 100644
--- a/docs/appendix/terminology.md
+++ b/docs/appendix/terminology.md
@@ -129,9 +129,9 @@ As an enterprise service, SLA commitment is essential. Deploying Kyuubi in High
| string | 1.3.0 |
| kyuubi.engine.event.loggers | SPARK | A comma-separated list of engine history loggers, where engine/session/operation etc events go.
SPARK: the events will be written to the Spark listener bus.
JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
JDBC: to be done
CUSTOM: User-defined event handlers.
Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a subclass of `org.apache.kyuubi.events.handler.CustomEventHandlerProvider` which has a zero-arg constructor. | seq | 1.3.0 |
-| kyuubi.engine.flink.extra.classpath | <undefined> | The extra classpath for the Flink SQL engine, for configuring the location of hadoop client jars, etc | string | 1.6.0 |
-| kyuubi.engine.flink.java.options | <undefined> | The extra Java options for the Flink SQL engine | string | 1.6.0 |
-| kyuubi.engine.flink.memory | 1g | The heap memory for the Flink SQL engine | string | 1.6.0 |
+| kyuubi.engine.flink.application.jars | <undefined> | A comma-separated list of the local jars to be shipped with the job to the cluster. For example, SQL UDF jars. Only effective in yarn application mode. | string | 1.8.0 |
+| kyuubi.engine.flink.extra.classpath | <undefined> | The extra classpath for the Flink SQL engine, for configuring the location of hadoop client jars, etc. Only effective in yarn session mode. | string | 1.6.0 |
+| kyuubi.engine.flink.java.options | <undefined> | The extra Java options for the Flink SQL engine. Only effective in yarn session mode. | string | 1.6.0 |
+| kyuubi.engine.flink.memory | 1g | The heap memory for the Flink SQL engine. Only effective in yarn session mode. | string | 1.6.0 |
| kyuubi.engine.hive.event.loggers | JSON | A comma-separated list of engine history loggers, where engine/session/operation etc events go.
JSON: the events will be written to the location of kyuubi.engine.event.json.log.path
JDBC: to be done
CUSTOM: to be done.
| seq | 1.7.0 |
| kyuubi.engine.hive.extra.classpath | <undefined> | The extra classpath for the Hive query engine, for configuring location of the hadoop client jars and etc. | string | 1.6.0 |
| kyuubi.engine.hive.java.options | <undefined> | The extra Java options for the Hive query engine | string | 1.6.0 |
diff --git a/externals/kyuubi-flink-sql-engine/pom.xml b/externals/kyuubi-flink-sql-engine/pom.xml
index f3633b904f5..0e499f9785b 100644
--- a/externals/kyuubi-flink-sql-engine/pom.xml
+++ b/externals/kyuubi-flink-sql-engine/pom.xml
@@ -126,11 +126,49 @@
${project.version}test
+
+
+ org.apache.kyuubi
+ kyuubi-zookeeper_${scala.binary.version}
+ ${project.version}
+ test
+
+
org.apache.flinkflink-test-utilstest
+
+
+ org.apache.hadoop
+ hadoop-client-minicluster
+ test
+
+
+
+ org.bouncycastle
+ bcprov-jdk15on
+ test
+
+
+
+ org.bouncycastle
+ bcpkix-jdk15on
+ test
+
+
+
+ jakarta.activation
+ jakarta.activation-api
+ test
+
+
+
+ jakarta.xml.bind
+ jakarta.xml.bind-api
+ test
+
diff --git a/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/flink/client/deployment/application/executors/EmbeddedExecutorFactory.java b/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/flink/client/deployment/application/executors/EmbeddedExecutorFactory.java
new file mode 100644
index 00000000000..69d69a55cfd
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/main/java/org/apache/flink/client/deployment/application/executors/EmbeddedExecutorFactory.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.client.deployment.application.executors;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+import static org.apache.flink.util.Preconditions.checkState;
+
+import java.util.Collection;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.JobID;
+import org.apache.flink.api.common.time.Time;
+import org.apache.flink.client.cli.ClientOptions;
+import org.apache.flink.client.deployment.application.EmbeddedJobClient;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.DeploymentOptions;
+import org.apache.flink.core.execution.PipelineExecutor;
+import org.apache.flink.core.execution.PipelineExecutorFactory;
+import org.apache.flink.runtime.dispatcher.DispatcherGateway;
+import org.apache.flink.util.concurrent.ScheduledExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Copied from Apache Flink to exposed the DispatcherGateway for Kyuubi statements. */
+@Internal
+public class EmbeddedExecutorFactory implements PipelineExecutorFactory {
+
+ private static Collection bootstrapJobIds;
+
+ private static Collection submittedJobIds;
+
+ private static DispatcherGateway dispatcherGateway;
+
+ private static ScheduledExecutor retryExecutor;
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(EmbeddedExecutorFactory.class);
+
+ public EmbeddedExecutorFactory() {
+ LOGGER.debug(
+ "{} loaded in thread {} with classloader {}.",
+ this.getClass().getCanonicalName(),
+ Thread.currentThread().getName(),
+ this.getClass().getClassLoader().toString());
+ }
+
+ /**
+ * Creates an {@link EmbeddedExecutorFactory}.
+ *
+ * @param submittedJobIds a list that is going to be filled with the job ids of the new jobs that
+ * will be submitted. This is essentially used to return the submitted job ids to the caller.
+ * @param dispatcherGateway the dispatcher of the cluster which is going to be used to submit
+ * jobs.
+ */
+ public EmbeddedExecutorFactory(
+ final Collection submittedJobIds,
+ final DispatcherGateway dispatcherGateway,
+ final ScheduledExecutor retryExecutor) {
+ // there should be only one instance of EmbeddedExecutorFactory
+ LOGGER.debug(
+ "{} initiated in thread {} with classloader {}.",
+ this.getClass().getCanonicalName(),
+ Thread.currentThread().getName(),
+ this.getClass().getClassLoader().toString());
+ checkState(EmbeddedExecutorFactory.submittedJobIds == null);
+ checkState(EmbeddedExecutorFactory.dispatcherGateway == null);
+ checkState(EmbeddedExecutorFactory.retryExecutor == null);
+ // submittedJobIds would be always 1, because we create a new list to avoid concurrent access
+ // issues
+ EmbeddedExecutorFactory.submittedJobIds =
+ new ConcurrentLinkedQueue<>(checkNotNull(submittedJobIds));
+ EmbeddedExecutorFactory.bootstrapJobIds = submittedJobIds;
+ EmbeddedExecutorFactory.dispatcherGateway = checkNotNull(dispatcherGateway);
+ EmbeddedExecutorFactory.retryExecutor = checkNotNull(retryExecutor);
+ }
+
+ @Override
+ public String getName() {
+ return EmbeddedExecutor.NAME;
+ }
+
+ @Override
+ public boolean isCompatibleWith(final Configuration configuration) {
+ // override Flink's implementation to allow usage in Kyuubi
+ LOGGER.debug("matching execution target: {}", configuration.get(DeploymentOptions.TARGET));
+ return configuration.get(DeploymentOptions.TARGET).equalsIgnoreCase("yarn-application")
+ && configuration.toMap().getOrDefault("yarn.tags", "").toLowerCase().contains("kyuubi");
+ }
+
+ @Override
+ public PipelineExecutor getExecutor(final Configuration configuration) {
+ checkNotNull(configuration);
+ Collection executorJobIDs;
+ if (bootstrapJobIds.size() > 0) {
+ LOGGER.info("Submitting new Kyuubi job. Job already submitted: {}.", submittedJobIds.size());
+ executorJobIDs = submittedJobIds;
+ } else {
+ LOGGER.info("Bootstrapping Flink SQL engine.");
+ executorJobIDs = bootstrapJobIds;
+ }
+ return new EmbeddedExecutor(
+ executorJobIDs,
+ dispatcherGateway,
+ (jobId, userCodeClassloader) -> {
+ final Time timeout =
+ Time.milliseconds(configuration.get(ClientOptions.CLIENT_TIMEOUT).toMillis());
+ return new EmbeddedJobClient(
+ jobId, dispatcherGateway, retryExecutor, timeout, userCodeClassloader);
+ });
+ }
+}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/resources/META-INF/services/org.apache.flink.core.execution.PipelineExecutorFactory b/externals/kyuubi-flink-sql-engine/src/main/resources/META-INF/services/org.apache.flink.core.execution.PipelineExecutorFactory
new file mode 100644
index 00000000000..c394c07a7ba
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/main/resources/META-INF/services/org.apache.flink.core.execution.PipelineExecutorFactory
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.flink.client.deployment.application.executors.EmbeddedExecutorFactory
\ No newline at end of file
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala
index 06fdc65ae61..42061a36959 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala
@@ -28,6 +28,7 @@ import scala.collection.mutable.ListBuffer
import org.apache.flink.client.cli.{DefaultCLI, GenericCLI}
import org.apache.flink.configuration.{Configuration, DeploymentOptions, GlobalConfiguration}
+import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.client.SqlClientException
import org.apache.flink.table.client.gateway.context.DefaultContext
import org.apache.flink.util.JarUtils
@@ -71,9 +72,12 @@ object FlinkSQLEngine extends Logging {
def main(args: Array[String]): Unit = {
SignalRegister.registerLogger(logger)
+ info(s"Flink SQL engine classpath: ${System.getProperty("java.class.path")}")
+
FlinkEngineUtils.checkFlinkVersion()
try {
+ kyuubiConf.loadFileDefaults()
Utils.fromCommandLineArgs(args, kyuubiConf)
val flinkConfDir = sys.env.getOrElse(
"FLINK_CONF_DIR", {
@@ -100,6 +104,11 @@ object FlinkSQLEngine extends Logging {
val appName = s"kyuubi_${user}_flink_${Instant.now}"
flinkConf.setString("yarn.application.name", appName)
}
+ if (flinkConf.containsKey("high-availability.cluster-id")) {
+ flinkConf.setString(
+ "yarn.application.id",
+ flinkConf.toMap.get("high-availability.cluster-id"))
+ }
case "kubernetes-application" =>
if (!flinkConf.containsKey("kubernetes.cluster-id")) {
val appName = s"kyuubi-${user}-flink-${Instant.now}"
@@ -122,7 +131,11 @@ object FlinkSQLEngine extends Logging {
kyuubiConf.setIfMissing(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0)
startEngine(engineContext)
- info("started engine...")
+ info("Flink engine started")
+
+ if ("yarn-application".equalsIgnoreCase(executionTarget)) {
+ bootstrapFlinkApplicationExecutor(flinkConf)
+ }
// blocking main thread
countDownLatch.await()
@@ -146,6 +159,15 @@ object FlinkSQLEngine extends Logging {
}
}
+ private def bootstrapFlinkApplicationExecutor(flinkConf: Configuration) = {
+ // trigger an execution to initiate EmbeddedExecutor
+ info("Running initial Flink SQL in application mode.")
+ val tableEnv = TableEnvironment.create(flinkConf)
+ val res = tableEnv.executeSql("select 'kyuubi'")
+ res.await()
+ info("Initial Flink SQL finished.")
+ }
+
private def discoverDependencies(
jars: Seq[URL],
libraries: Seq[URL]): List[URL] = {
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala
index de104150fe6..10ad5bf6d3a 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala
@@ -28,7 +28,7 @@ import org.apache.flink.table.api.ResultKind
import org.apache.flink.table.client.gateway.TypedResult
import org.apache.flink.table.data.{GenericArrayData, GenericMapData, RowData}
import org.apache.flink.table.data.binary.{BinaryArrayData, BinaryMapData}
-import org.apache.flink.table.operations.{Operation, QueryOperation}
+import org.apache.flink.table.operations.{ModifyOperation, Operation, QueryOperation}
import org.apache.flink.table.operations.command._
import org.apache.flink.table.types.DataType
import org.apache.flink.table.types.logical._
@@ -80,6 +80,7 @@ class ExecuteStatement(
val operation = executor.parseStatement(sessionId, statement)
operation match {
case queryOperation: QueryOperation => runQueryOperation(queryOperation)
+ case modifyOperation: ModifyOperation => runModifyOperation(modifyOperation)
case setOperation: SetOperation =>
resultSet = OperationUtils.runSetOperation(setOperation, executor, sessionId)
case resetOperation: ResetOperation =>
@@ -143,6 +144,12 @@ class ExecuteStatement(
}
}
+ private def runModifyOperation(operation: ModifyOperation): Unit = {
+ val result = executor.executeOperation(sessionId, operation)
+ jobId = result.getJobClient.asScala.map(_.getJobID)
+ resultSet = ResultSet.fromJobId(jobId.orNull)
+ }
+
private def runOperation(operation: Operation): Unit = {
val result = executor.executeOperation(sessionId, operation)
jobId = result.getJobClient.asScala.map(_.getJobID)
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala
index 13673381258..09c40198856 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala
@@ -22,7 +22,8 @@ import java.util
import scala.collection.JavaConverters._
import com.google.common.collect.Iterators
-import org.apache.flink.table.api.{ResultKind, TableResult}
+import org.apache.flink.api.common.JobID
+import org.apache.flink.table.api.{DataTypes, ResultKind, TableResult}
import org.apache.flink.table.catalog.Column
import org.apache.flink.types.Row
@@ -68,6 +69,20 @@ object ResultSet {
.build
}
+ def fromJobId(jobID: JobID): ResultSet = {
+ val data: Array[Row] = if (jobID != null) {
+ Array(Row.of(jobID.toString))
+ } else {
+ // should not happen
+ Array(Row.of("(Empty Job ID)"))
+ }
+ builder
+ .resultKind(ResultKind.SUCCESS_WITH_CONTENT)
+ .columns(Column.physical("result", DataTypes.STRING()))
+ .data(data)
+ .build;
+ }
+
def builder: Builder = new ResultSet.Builder
class Builder {
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala
new file mode 100644
index 00000000000..aebcce6c589
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.engine.flink
+
+import java.util.UUID
+
+import org.apache.kyuubi.config.KyuubiConf.{ENGINE_SHARE_LEVEL, ENGINE_TYPE}
+import org.apache.kyuubi.engine.ShareLevel
+import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_NAMESPACE}
+import org.apache.kyuubi.ha.client.{DiscoveryClient, DiscoveryClientProvider}
+
+trait WithDiscoveryFlinkSQLEngine extends WithFlinkSQLEngineOnYarn {
+
+ override protected def engineRefId: String = UUID.randomUUID().toString
+
+ def namespace: String = "/kyuubi/flink-yarn-application-test"
+
+ def shareLevel: String = ShareLevel.USER.toString
+
+ def engineType: String = "flink"
+
+ override def withKyuubiConf: Map[String, String] = {
+ Map(
+ HA_NAMESPACE.key -> namespace,
+ HA_ENGINE_REF_ID.key -> engineRefId,
+ ENGINE_TYPE.key -> "FLINK_SQL",
+ ENGINE_SHARE_LEVEL.key -> shareLevel)
+ }
+
+ def withDiscoveryClient(f: DiscoveryClient => Unit): Unit = {
+ DiscoveryClientProvider.withDiscoveryClient(conf)(f)
+ }
+
+ def getFlinkEngineServiceUrl: String = {
+ var hostPort: Option[(String, Int)] = None
+ var retries = 0
+ while (hostPort.isEmpty && retries < 5) {
+ withDiscoveryClient(client => hostPort = client.getServerHost(namespace))
+ retries += 1
+ Thread.sleep(1000L)
+ }
+ if (hostPort.isEmpty) {
+ throw new RuntimeException("Time out retrieving Flink engine service url.")
+ }
+ // delay the access to thrift service because the thrift service
+ // may not be ready although it's registered
+ Thread.sleep(3000L)
+ s"jdbc:hive2://${hostPort.get._1}:${hostPort.get._2}"
+ }
+}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngine.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
similarity index 79%
rename from externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngine.scala
rename to externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
index fbfb8df29ac..c8435f9c54c 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngine.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
@@ -24,32 +24,20 @@ import org.apache.flink.configuration.{Configuration, RestOptions}
import org.apache.flink.runtime.minicluster.{MiniCluster, MiniClusterConfiguration}
import org.apache.flink.table.client.gateway.context.DefaultContext
-import org.apache.kyuubi.{KyuubiFunSuite, Utils}
+import org.apache.kyuubi.KyuubiFunSuite
import org.apache.kyuubi.config.KyuubiConf
-import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar
-trait WithFlinkSQLEngine extends KyuubiFunSuite {
+trait WithFlinkSQLEngineLocal extends KyuubiFunSuite with WithFlinkTestResources {
protected val flinkConfig = new Configuration()
protected var miniCluster: MiniCluster = _
protected var engine: FlinkSQLEngine = _
// conf will be loaded until start flink engine
def withKyuubiConf: Map[String, String]
- val kyuubiConf: KyuubiConf = FlinkSQLEngine.kyuubiConf
+ protected val kyuubiConf: KyuubiConf = FlinkSQLEngine.kyuubiConf
protected var connectionUrl: String = _
- protected val GENERATED_UDF_CLASS: String = "LowerUDF"
-
- protected val GENERATED_UDF_CODE: String =
- s"""
- public class $GENERATED_UDF_CLASS extends org.apache.flink.table.functions.ScalarFunction {
- public String eval(String str) {
- return str.toLowerCase();
- }
- }
- """
-
override def beforeAll(): Unit = {
startMiniCluster()
startFlinkEngine()
@@ -67,11 +55,6 @@ trait WithFlinkSQLEngine extends KyuubiFunSuite {
System.setProperty(k, v)
kyuubiConf.set(k, v)
}
- val udfJar = TestUserClassLoaderJar.createJarFile(
- Utils.createTempDir("test-jar").toFile,
- "test-classloader-udf.jar",
- GENERATED_UDF_CLASS,
- GENERATED_UDF_CODE)
val engineContext = new DefaultContext(
List(udfJar.toURI.toURL).asJava,
flinkConfig,
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala
new file mode 100644
index 00000000000..3847087b3fc
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala
@@ -0,0 +1,265 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.engine.flink
+
+import java.io.{File, FilenameFilter, FileWriter}
+import java.lang.ProcessBuilder.Redirect
+import java.net.URI
+import java.nio.file.{Files, Paths}
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable.{ArrayBuffer, ListBuffer}
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hdfs.MiniDFSCluster
+import org.apache.hadoop.yarn.conf.YarnConfiguration
+import org.apache.hadoop.yarn.server.MiniYARNCluster
+
+import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiFunSuite, SCALA_COMPILE_VERSION, Utils}
+import org.apache.kyuubi.config.KyuubiConf
+import org.apache.kyuubi.config.KyuubiConf.{ENGINE_FLINK_APPLICATION_JARS, KYUUBI_HOME}
+import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ADDRESSES
+import org.apache.kyuubi.zookeeper.EmbeddedZookeeper
+import org.apache.kyuubi.zookeeper.ZookeeperConf.{ZK_CLIENT_PORT, ZK_CLIENT_PORT_ADDRESS}
+
+trait WithFlinkSQLEngineOnYarn extends KyuubiFunSuite with WithFlinkTestResources {
+
+ protected def engineRefId: String
+
+ protected val conf: KyuubiConf = new KyuubiConf(false)
+
+ private var hdfsCluster: MiniDFSCluster = _
+
+ private var yarnCluster: MiniYARNCluster = _
+
+ private var zkServer: EmbeddedZookeeper = _
+
+ def withKyuubiConf: Map[String, String]
+
+ private val yarnConf: YarnConfiguration = {
+ val yarnConfig = new YarnConfiguration()
+
+ // configurations copied from org.apache.flink.yarn.YarnTestBase
+ yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 32)
+ yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 4096)
+
+ yarnConfig.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true)
+ yarnConfig.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2)
+ yarnConfig.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 2)
+ yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 4)
+ yarnConfig.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600)
+ yarnConfig.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false)
+ // memory is overwritten in the MiniYARNCluster.
+ // so we have to change the number of cores for testing.
+ yarnConfig.setInt(YarnConfiguration.NM_VCORES, 666)
+ yarnConfig.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 99.0f)
+ yarnConfig.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1000)
+ yarnConfig.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, 5000)
+
+ // capacity-scheduler.xml is missing in hadoop-client-minicluster so this is a workaround
+ yarnConfig.set("yarn.scheduler.capacity.root.queues", "default,four_cores_queue")
+
+ yarnConfig.setInt("yarn.scheduler.capacity.root.default.capacity", 100)
+ yarnConfig.setFloat("yarn.scheduler.capacity.root.default.user-limit-factor", 1)
+ yarnConfig.setInt("yarn.scheduler.capacity.root.default.maximum-capacity", 100)
+ yarnConfig.set("yarn.scheduler.capacity.root.default.state", "RUNNING")
+ yarnConfig.set("yarn.scheduler.capacity.root.default.acl_submit_applications", "*")
+ yarnConfig.set("yarn.scheduler.capacity.root.default.acl_administer_queue", "*")
+
+ yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-capacity", 100)
+ yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-applications", 10)
+ yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-allocation-vcores", 4)
+ yarnConfig.setFloat("yarn.scheduler.capacity.root.four_cores_queue.user-limit-factor", 1)
+ yarnConfig.set("yarn.scheduler.capacity.root.four_cores_queue.acl_submit_applications", "*")
+ yarnConfig.set("yarn.scheduler.capacity.root.four_cores_queue.acl_administer_queue", "*")
+
+ yarnConfig.setInt("yarn.scheduler.capacity.node-locality-delay", -1)
+ // Set bind host to localhost to avoid java.net.BindException
+ yarnConfig.set(YarnConfiguration.RM_BIND_HOST, "localhost")
+ yarnConfig.set(YarnConfiguration.NM_BIND_HOST, "localhost")
+
+ yarnConfig
+ }
+
+ override def beforeAll(): Unit = {
+ zkServer = new EmbeddedZookeeper()
+ conf.set(ZK_CLIENT_PORT, 0).set(ZK_CLIENT_PORT_ADDRESS, "localhost")
+ zkServer.initialize(conf)
+ zkServer.start()
+ conf.set(HA_ADDRESSES, zkServer.getConnectString)
+
+ hdfsCluster = new MiniDFSCluster.Builder(new Configuration)
+ .numDataNodes(1)
+ .checkDataNodeAddrConfig(true)
+ .checkDataNodeHostConfig(true)
+ .build()
+
+ val hdfsServiceUrl = s"hdfs://localhost:${hdfsCluster.getNameNodePort}"
+ yarnConf.set("fs.defaultFS", hdfsServiceUrl)
+ yarnConf.addResource(hdfsCluster.getConfiguration(0))
+
+ val cp = System.getProperty("java.class.path")
+ // exclude kyuubi flink engine jar that has SPI for EmbeddedExecutorFactory
+ // which can't be initialized on the client side
+ val hadoopJars = cp.split(":").filter(s => !s.contains("flink") && !s.contains("log4j"))
+ val hadoopClasspath = hadoopJars.mkString(":")
+ yarnConf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, hadoopClasspath)
+
+ yarnCluster = new MiniYARNCluster("flink-engine-cluster", 1, 1, 1)
+ yarnCluster.init(yarnConf)
+ yarnCluster.start()
+
+ val hadoopConfDir = Utils.createTempDir().toFile
+ val writer = new FileWriter(new File(hadoopConfDir, "core-site.xml"))
+ yarnCluster.getConfig.writeXml(writer)
+ writer.close()
+
+ val envs = scala.collection.mutable.Map[String, String]()
+ val kyuubiExternals = Utils.getCodeSourceLocation(getClass)
+ .split("externals").head
+ val flinkHome = {
+ val candidates = Paths.get(kyuubiExternals, "externals", "kyuubi-download", "target")
+ .toFile.listFiles(f => f.getName.contains("flink"))
+ if (candidates == null) None else candidates.map(_.toPath).headOption
+ }
+ if (flinkHome.isDefined) {
+ envs("FLINK_HOME") = flinkHome.get.toString
+ envs("FLINK_CONF_DIR") = Paths.get(flinkHome.get.toString, "conf").toString
+ }
+ envs("HADOOP_CLASSPATH") = hadoopClasspath
+ envs("HADOOP_CONF_DIR") = hadoopConfDir.getAbsolutePath
+
+ startFlinkEngine(envs.toMap)
+
+ super.beforeAll()
+ }
+
+ private def startFlinkEngine(envs: Map[String, String]): Unit = {
+ val processBuilder: ProcessBuilder = new ProcessBuilder
+ processBuilder.environment().putAll(envs.asJava)
+
+ conf.set(ENGINE_FLINK_APPLICATION_JARS, udfJar.getAbsolutePath)
+ val flinkExtraJars = extraFlinkJars(envs("FLINK_HOME"))
+ val command = new ArrayBuffer[String]()
+
+ command += s"${envs("FLINK_HOME")}${File.separator}bin/flink"
+ command += "run-application"
+ command += "-t"
+ command += "yarn-application"
+ command += s"-Dyarn.ship-files=${flinkExtraJars.mkString(";")}"
+ command += s"-Dyarn.tags=KYUUBI,$engineRefId"
+ command += "-Djobmanager.memory.process.size=1g"
+ command += "-Dtaskmanager.memory.process.size=1g"
+ command += "-Dcontainerized.master.env.FLINK_CONF_DIR=."
+ command += "-Dcontainerized.taskmanager.env.FLINK_CONF_DIR=."
+ command += s"-Dcontainerized.master.env.HADOOP_CONF_DIR=${envs("HADOOP_CONF_DIR")}"
+ command += s"-Dcontainerized.taskmanager.env.HADOOP_CONF_DIR=${envs("HADOOP_CONF_DIR")}"
+ command += "-Dexecution.target=yarn-application"
+ command += "-c"
+ command += "org.apache.kyuubi.engine.flink.FlinkSQLEngine"
+ command += s"${mainResource(envs).get}"
+
+ for ((k, v) <- withKyuubiConf) {
+ conf.set(k, v)
+ }
+
+ for ((k, v) <- conf.getAll) {
+ command += "--conf"
+ command += s"$k=$v"
+ }
+
+ processBuilder.command(command.toList.asJava)
+ processBuilder.redirectOutput(Redirect.INHERIT)
+ processBuilder.redirectError(Redirect.INHERIT)
+
+ info(s"staring flink yarn-application cluster for engine $engineRefId..")
+ val process = processBuilder.start()
+ process.waitFor()
+ info(s"flink yarn-application cluster for engine $engineRefId has started")
+ }
+
+ def extraFlinkJars(flinkHome: String): Array[String] = {
+ // locate flink sql jars
+ val flinkExtraJars = new ListBuffer[String]
+ val flinkSQLJars = Paths.get(flinkHome)
+ .resolve("opt")
+ .toFile
+ .listFiles(new FilenameFilter {
+ override def accept(dir: File, name: String): Boolean = {
+ name.toLowerCase.startsWith("flink-sql-client") ||
+ name.toLowerCase.startsWith("flink-sql-gateway")
+ }
+ }).map(f => f.getAbsolutePath).sorted
+ flinkExtraJars ++= flinkSQLJars
+
+ val userJars = conf.get(ENGINE_FLINK_APPLICATION_JARS)
+ userJars.foreach(jars => flinkExtraJars ++= jars.split(","))
+ flinkExtraJars.toArray
+ }
+
+ /**
+ * Copied form org.apache.kyuubi.engine.ProcBuilder
+ * The engine jar or other runnable jar containing the main method
+ */
+ def mainResource(env: Map[String, String]): Option[String] = {
+ // 1. get the main resource jar for user specified config first
+ val module = "kyuubi-flink-sql-engine"
+ val shortName = "flink"
+ val jarName = s"${module}_$SCALA_COMPILE_VERSION-$KYUUBI_VERSION.jar"
+ conf.getOption(s"kyuubi.session.engine.$shortName.main.resource").filter { userSpecified =>
+ // skip check exist if not local file.
+ val uri = new URI(userSpecified)
+ val schema = if (uri.getScheme != null) uri.getScheme else "file"
+ schema match {
+ case "file" => Files.exists(Paths.get(userSpecified))
+ case _ => true
+ }
+ }.orElse {
+ // 2. get the main resource jar from system build default
+ env.get(KYUUBI_HOME).toSeq
+ .flatMap { p =>
+ Seq(
+ Paths.get(p, "externals", "engines", shortName, jarName),
+ Paths.get(p, "externals", module, "target", jarName))
+ }
+ .find(Files.exists(_)).map(_.toAbsolutePath.toFile.getCanonicalPath)
+ }.orElse {
+ // 3. get the main resource from dev environment
+ val cwd = Utils.getCodeSourceLocation(getClass).split("externals")
+ assert(cwd.length > 1)
+ Option(Paths.get(cwd.head, "externals", module, "target", jarName))
+ .map(_.toAbsolutePath.toFile.getCanonicalPath)
+ }
+ }
+
+ override def afterAll(): Unit = {
+ super.afterAll()
+ if (yarnCluster != null) {
+ yarnCluster.stop()
+ yarnCluster = null
+ }
+ if (hdfsCluster != null) {
+ hdfsCluster.shutdown()
+ hdfsCluster = null
+ }
+ if (zkServer != null) {
+ zkServer.stop()
+ zkServer = null
+ }
+ }
+}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala
new file mode 100644
index 00000000000..6a85654f0d7
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.engine.flink
+
+import org.apache.kyuubi.Utils
+import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar
+
+trait WithFlinkTestResources {
+
+ protected val GENERATED_UDF_CLASS: String = "LowerUDF"
+
+ protected val GENERATED_UDF_CODE: String =
+ s"""
+ public class $GENERATED_UDF_CLASS extends org.apache.flink.table.functions.ScalarFunction {
+ public String eval(String str) {
+ return str.toLowerCase();
+ }
+ }
+ """
+
+ protected val udfJar = TestUserClassLoaderJar.createJarFile(
+ Utils.createTempDir("test-jar").toFile,
+ "test-classloader-udf.jar",
+ GENERATED_UDF_CLASS,
+ GENERATED_UDF_CODE)
+}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala
new file mode 100644
index 00000000000..e4e6a5c67ea
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.engine.flink.operation
+
+import org.apache.kyuubi.config.KyuubiConf._
+import org.apache.kyuubi.engine.flink.WithFlinkSQLEngineLocal
+import org.apache.kyuubi.operation.NoneMode
+
+class FlinkOperationLocalSuite extends FlinkOperationSuite
+ with WithFlinkSQLEngineLocal {
+
+ override def withKyuubiConf: Map[String, String] =
+ Map(OPERATION_PLAN_ONLY_MODE.key -> NoneMode.name)
+
+ override protected def jdbcUrl: String =
+ s"jdbc:hive2://${engine.frontendServices.head.connectionUrl}/;"
+
+}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala
new file mode 100644
index 00000000000..b43e83db6cc
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.engine.flink.operation
+
+import org.apache.kyuubi.engine.flink.WithDiscoveryFlinkSQLEngine
+
+class FlinkOperationOnYarnSuite extends FlinkOperationSuite
+ with WithDiscoveryFlinkSQLEngine {
+
+ protected def jdbcUrl: String = getFlinkEngineServiceUrl
+}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
index 8345d4f9feb..77ce3b3eebb 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
@@ -25,34 +25,17 @@ import scala.collection.JavaConverters._
import org.apache.flink.api.common.JobID
import org.apache.flink.table.types.logical.LogicalTypeRoot
import org.apache.hive.service.rpc.thrift._
-import org.scalatest.concurrent.PatienceConfiguration.Timeout
-import org.scalatest.time.SpanSugar._
import org.apache.kyuubi.Utils
import org.apache.kyuubi.config.KyuubiConf._
-import org.apache.kyuubi.engine.flink.WithFlinkSQLEngine
+import org.apache.kyuubi.engine.flink.WithFlinkTestResources
import org.apache.kyuubi.engine.flink.result.Constants
import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar
import org.apache.kyuubi.jdbc.hive.KyuubiStatement
-import org.apache.kyuubi.operation.{HiveJDBCTestHelper, NoneMode}
+import org.apache.kyuubi.operation.HiveJDBCTestHelper
import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._
-import org.apache.kyuubi.service.ServiceState._
-class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper {
- override def withKyuubiConf: Map[String, String] =
- Map(OPERATION_PLAN_ONLY_MODE.key -> NoneMode.name)
-
- override protected def jdbcUrl: String =
- s"jdbc:hive2://${engine.frontendServices.head.connectionUrl}/;"
-
- ignore("release session if shared level is CONNECTION") {
- logger.info(s"jdbc url is $jdbcUrl")
- assert(engine.getServiceState == STARTED)
- withJdbcStatement() { _ => }
- eventually(Timeout(20.seconds)) {
- assert(engine.getServiceState == STOPPED)
- }
- }
+abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTestResources {
test("get catalogs") {
withJdbcStatement() { statement =>
@@ -784,7 +767,8 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("select map ['k1', 'v1', 'k2', 'v2']")
assert(resultSet.next())
- assert(resultSet.getString(1) == "{k1=v1, k2=v2}")
+ assert(List("{k1=v1, k2=v2}", "{k2=v2, k1=v1}")
+ .contains(resultSet.getString(1)))
val metaData = resultSet.getMetaData
assert(metaData.getColumnType(1) === java.sql.Types.JAVA_OBJECT)
}
@@ -966,16 +950,34 @@ class FlinkOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper {
}
}
- test("execute statement - insert into") {
+ test("execute statement - batch insert into") {
withMultipleConnectionJdbcStatement() { statement =>
statement.executeQuery("create table tbl_a (a int) with ('connector' = 'blackhole')")
val resultSet = statement.executeQuery("insert into tbl_a select 1")
val metadata = resultSet.getMetaData
- assert(metadata.getColumnName(1) == "default_catalog.default_database.tbl_a")
- assert(metadata.getColumnType(1) == java.sql.Types.BIGINT)
+ assert(metadata.getColumnName(1) === "result")
+ assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR)
assert(resultSet.next())
- assert(resultSet.getLong(1) == -1L)
- }
+ assert(resultSet.getString(1).length == 32)
+ };
+ }
+
+ test("execute statement - streaming insert into") {
+ withMultipleConnectionJdbcStatement()({ statement =>
+ // Flink currently doesn't support stop job statement, thus use a finite stream
+ statement.executeQuery(
+ "create table tbl_a (a int) with (" +
+ "'connector' = 'datagen', " +
+ "'rows-per-second'='10', " +
+ "'number-of-rows'='100')")
+ statement.executeQuery("create table tbl_b (a int) with ('connector' = 'blackhole')")
+ val resultSet = statement.executeQuery("insert into tbl_b select * from tbl_a")
+ val metadata = resultSet.getMetaData
+ assert(metadata.getColumnName(1) === "result")
+ assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR)
+ assert(resultSet.next())
+ assert(resultSet.getString(1).length == 32)
+ })
}
test("execute statement - set properties") {
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala
index 1194f3582b1..1657f21f61d 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala
@@ -20,10 +20,10 @@ package org.apache.kyuubi.engine.flink.operation
import java.sql.Statement
import org.apache.kyuubi.config.KyuubiConf
-import org.apache.kyuubi.engine.flink.WithFlinkSQLEngine
+import org.apache.kyuubi.engine.flink.WithFlinkSQLEngineLocal
import org.apache.kyuubi.operation.{AnalyzeMode, ExecutionMode, HiveJDBCTestHelper, ParseMode, PhysicalMode}
-class PlanOnlyOperationSuite extends WithFlinkSQLEngine with HiveJDBCTestHelper {
+class PlanOnlyOperationSuite extends WithFlinkSQLEngineLocal with HiveJDBCTestHelper {
override def withKyuubiConf: Map[String, String] =
Map(
diff --git a/integration-tests/kyuubi-flink-it/pom.xml b/integration-tests/kyuubi-flink-it/pom.xml
index c6a55c62cb6..eada7841c73 100644
--- a/integration-tests/kyuubi-flink-it/pom.xml
+++ b/integration-tests/kyuubi-flink-it/pom.xml
@@ -79,6 +79,37 @@
test
+
+
+ org.apache.hadoop
+ hadoop-client-minicluster
+ test
+
+
+
+ org.bouncycastle
+ bcprov-jdk15on
+ test
+
+
+
+ org.bouncycastle
+ bcpkix-jdk15on
+ test
+
+
+
+ jakarta.activation
+ jakarta.activation-api
+ test
+
+
+
+ jakarta.xml.bind
+ jakarta.xml.bind-api
+ test
+
+
diff --git a/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/WithKyuubiServerAndYarnMiniCluster.scala b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/WithKyuubiServerAndYarnMiniCluster.scala
new file mode 100644
index 00000000000..de9a8ae2d28
--- /dev/null
+++ b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/WithKyuubiServerAndYarnMiniCluster.scala
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.it.flink
+
+import java.io.{File, FileWriter}
+import java.nio.file.Paths
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration
+
+import org.apache.kyuubi.{KyuubiFunSuite, Utils, WithKyuubiServer}
+import org.apache.kyuubi.config.KyuubiConf
+import org.apache.kyuubi.config.KyuubiConf.KYUUBI_ENGINE_ENV_PREFIX
+import org.apache.kyuubi.server.{MiniDFSService, MiniYarnService}
+
+trait WithKyuubiServerAndYarnMiniCluster extends KyuubiFunSuite with WithKyuubiServer {
+
+ val kyuubiHome: String = Utils.getCodeSourceLocation(getClass).split("integration-tests").head
+
+ override protected val conf: KyuubiConf = new KyuubiConf(false)
+
+ protected var miniHdfsService: MiniDFSService = _
+
+ protected var miniYarnService: MiniYarnService = _
+
+ private val yarnConf: YarnConfiguration = {
+ val yarnConfig = new YarnConfiguration()
+
+ // configurations copied from org.apache.flink.yarn.YarnTestBase
+ yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 32)
+ yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 4096)
+
+ yarnConfig.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true)
+ yarnConfig.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2)
+ yarnConfig.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 2)
+ yarnConfig.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 4)
+ yarnConfig.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600)
+ yarnConfig.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false)
+ // memory is overwritten in the MiniYARNCluster.
+ // so we have to change the number of cores for testing.
+ yarnConfig.setInt(YarnConfiguration.NM_VCORES, 666)
+ yarnConfig.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 99.0f)
+ yarnConfig.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1000)
+ yarnConfig.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, 5000)
+
+ // capacity-scheduler.xml is missing in hadoop-client-minicluster so this is a workaround
+ yarnConfig.set("yarn.scheduler.capacity.root.queues", "default,four_cores_queue")
+
+ yarnConfig.setInt("yarn.scheduler.capacity.root.default.capacity", 100)
+ yarnConfig.setFloat("yarn.scheduler.capacity.root.default.user-limit-factor", 1)
+ yarnConfig.setInt("yarn.scheduler.capacity.root.default.maximum-capacity", 100)
+ yarnConfig.set("yarn.scheduler.capacity.root.default.state", "RUNNING")
+ yarnConfig.set("yarn.scheduler.capacity.root.default.acl_submit_applications", "*")
+ yarnConfig.set("yarn.scheduler.capacity.root.default.acl_administer_queue", "*")
+
+ yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-capacity", 100)
+ yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-applications", 10)
+ yarnConfig.setInt("yarn.scheduler.capacity.root.four_cores_queue.maximum-allocation-vcores", 4)
+ yarnConfig.setFloat("yarn.scheduler.capacity.root.four_cores_queue.user-limit-factor", 1)
+ yarnConfig.set("yarn.scheduler.capacity.root.four_cores_queue.acl_submit_applications", "*")
+ yarnConfig.set("yarn.scheduler.capacity.root.four_cores_queue.acl_administer_queue", "*")
+
+ yarnConfig.setInt("yarn.scheduler.capacity.node-locality-delay", -1)
+ // Set bind host to localhost to avoid java.net.BindException
+ yarnConfig.set(YarnConfiguration.RM_BIND_HOST, "localhost")
+ yarnConfig.set(YarnConfiguration.NM_BIND_HOST, "localhost")
+
+ yarnConfig
+ }
+
+ override def beforeAll(): Unit = {
+ miniHdfsService = new MiniDFSService()
+ miniHdfsService.initialize(conf)
+ miniHdfsService.start()
+
+ val hdfsServiceUrl = s"hdfs://localhost:${miniHdfsService.getDFSPort}"
+ yarnConf.set("fs.defaultFS", hdfsServiceUrl)
+ yarnConf.addResource(miniHdfsService.getHadoopConf)
+
+ val cp = System.getProperty("java.class.path")
+ // exclude kyuubi flink engine jar that has SPI for EmbeddedExecutorFactory
+ // which can't be initialized on the client side
+ val hadoopJars = cp.split(":").filter(s => !s.contains("flink"))
+ val hadoopClasspath = hadoopJars.mkString(":")
+ yarnConf.set("yarn.application.classpath", hadoopClasspath)
+
+ miniYarnService = new MiniYarnService()
+ miniYarnService.setYarnConf(yarnConf)
+ miniYarnService.initialize(conf)
+ miniYarnService.start()
+
+ val hadoopConfDir = Utils.createTempDir().toFile
+ val writer = new FileWriter(new File(hadoopConfDir, "core-site.xml"))
+ yarnConf.writeXml(writer)
+ writer.close()
+
+ val flinkHome = {
+ val candidates = Paths.get(kyuubiHome, "externals", "kyuubi-download", "target")
+ .toFile.listFiles(f => f.getName.contains("flink"))
+ if (candidates == null) None else candidates.map(_.toPath).headOption
+ }
+ if (flinkHome.isEmpty) {
+ throw new IllegalStateException(s"Flink home not found in $kyuubiHome/externals")
+ }
+
+ conf.set(s"$KYUUBI_ENGINE_ENV_PREFIX.KYUUBI_HOME", kyuubiHome)
+ conf.set(s"$KYUUBI_ENGINE_ENV_PREFIX.FLINK_HOME", flinkHome.get.toString)
+ conf.set(
+ s"$KYUUBI_ENGINE_ENV_PREFIX.FLINK_CONF_DIR",
+ s"${flinkHome.get.toString}${File.separator}conf")
+ conf.set(s"$KYUUBI_ENGINE_ENV_PREFIX.HADOOP_CLASSPATH", hadoopClasspath)
+ conf.set(s"$KYUUBI_ENGINE_ENV_PREFIX.HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath)
+ conf.set(s"flink.containerized.master.env.HADOOP_CLASSPATH", hadoopClasspath)
+ conf.set(s"flink.containerized.master.env.HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath)
+ conf.set(s"flink.containerized.taskmanager.env.HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath)
+
+ super.beforeAll()
+ }
+
+ override def afterAll(): Unit = {
+ super.afterAll()
+ if (miniYarnService != null) {
+ miniYarnService.stop()
+ miniYarnService = null
+ }
+ if (miniHdfsService != null) {
+ miniHdfsService.stop()
+ miniHdfsService = null
+ }
+ }
+}
diff --git a/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuiteOnYarn.scala b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuiteOnYarn.scala
new file mode 100644
index 00000000000..afa4dce8f85
--- /dev/null
+++ b/integration-tests/kyuubi-flink-it/src/test/scala/org/apache/kyuubi/it/flink/operation/FlinkOperationSuiteOnYarn.scala
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.it.flink.operation
+
+import org.apache.hive.service.rpc.thrift.{TGetInfoReq, TGetInfoType}
+
+import org.apache.kyuubi.config.KyuubiConf
+import org.apache.kyuubi.config.KyuubiConf._
+import org.apache.kyuubi.it.flink.WithKyuubiServerAndYarnMiniCluster
+import org.apache.kyuubi.operation.HiveJDBCTestHelper
+import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT
+
+class FlinkOperationSuiteOnYarn extends WithKyuubiServerAndYarnMiniCluster
+ with HiveJDBCTestHelper {
+
+ override protected def jdbcUrl: String = {
+ // delay the access to thrift service because the thrift service
+ // may not be ready although it's registered
+ Thread.sleep(3000L)
+ getJdbcUrl
+ }
+
+ override def beforeAll(): Unit = {
+ conf
+ .set(s"$KYUUBI_ENGINE_ENV_PREFIX.$KYUUBI_HOME", kyuubiHome)
+ .set(ENGINE_TYPE, "FLINK_SQL")
+ .set("flink.execution.target", "yarn-application")
+ .set("flink.parallelism.default", "6")
+ super.beforeAll()
+ }
+
+ test("get catalogs for flink sql") {
+ withJdbcStatement() { statement =>
+ val meta = statement.getConnection.getMetaData
+ val catalogs = meta.getCatalogs
+ val expected = Set("default_catalog").toIterator
+ while (catalogs.next()) {
+ assert(catalogs.getString(TABLE_CAT) === expected.next())
+ }
+ assert(!expected.hasNext)
+ assert(!catalogs.next())
+ }
+ }
+
+ test("execute statement - create/alter/drop table") {
+ withJdbcStatement() { statement =>
+ statement.executeQuery("create table tbl_a (a string) with ('connector' = 'blackhole')")
+ assert(statement.execute("alter table tbl_a rename to tbl_b"))
+ assert(statement.execute("drop table tbl_b"))
+ }
+ }
+
+ test("execute statement - select column name with dots") {
+ withJdbcStatement() { statement =>
+ val resultSet = statement.executeQuery("select 'tmp.hello'")
+ assert(resultSet.next())
+ assert(resultSet.getString(1) === "tmp.hello")
+ }
+ }
+
+ test("set kyuubi conf into flink conf") {
+ withJdbcStatement() { statement =>
+ val resultSet = statement.executeQuery("SET")
+ // Flink does not support set key without value currently,
+ // thus read all rows to find the desired one
+ var success = false
+ while (resultSet.next() && !success) {
+ if (resultSet.getString(1) == "parallelism.default" &&
+ resultSet.getString(2) == "6") {
+ success = true
+ }
+ }
+ assert(success)
+ }
+ }
+
+ test("server info provider - server") {
+ withSessionConf(Map(KyuubiConf.SERVER_INFO_PROVIDER.key -> "SERVER"))()() {
+ withSessionHandle { (client, handle) =>
+ val req = new TGetInfoReq()
+ req.setSessionHandle(handle)
+ req.setInfoType(TGetInfoType.CLI_DBMS_NAME)
+ assert(client.GetInfo(req).getInfoValue.getStringValue === "Apache Kyuubi")
+ }
+ }
+ }
+
+ test("server info provider - engine") {
+ withSessionConf(Map(KyuubiConf.SERVER_INFO_PROVIDER.key -> "ENGINE"))()() {
+ withSessionHandle { (client, handle) =>
+ val req = new TGetInfoReq()
+ req.setSessionHandle(handle)
+ req.setInfoType(TGetInfoType.CLI_DBMS_NAME)
+ assert(client.GetInfo(req).getInfoValue.getStringValue === "Apache Flink")
+ }
+ }
+ }
+}
diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
index b5229e2ad4f..2634bb4abc8 100644
--- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
+++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
@@ -2367,14 +2367,14 @@ object KyuubiConf {
val ENGINE_FLINK_MEMORY: ConfigEntry[String] =
buildConf("kyuubi.engine.flink.memory")
- .doc("The heap memory for the Flink SQL engine")
+ .doc("The heap memory for the Flink SQL engine. Only effective in yarn session mode.")
.version("1.6.0")
.stringConf
.createWithDefault("1g")
val ENGINE_FLINK_JAVA_OPTIONS: OptionalConfigEntry[String] =
buildConf("kyuubi.engine.flink.java.options")
- .doc("The extra Java options for the Flink SQL engine")
+ .doc("The extra Java options for the Flink SQL engine. Only effective in yarn session mode.")
.version("1.6.0")
.stringConf
.createOptional
@@ -2382,11 +2382,19 @@ object KyuubiConf {
val ENGINE_FLINK_EXTRA_CLASSPATH: OptionalConfigEntry[String] =
buildConf("kyuubi.engine.flink.extra.classpath")
.doc("The extra classpath for the Flink SQL engine, for configuring the location" +
- " of hadoop client jars, etc")
+ " of hadoop client jars, etc. Only effective in yarn session mode.")
.version("1.6.0")
.stringConf
.createOptional
+ val ENGINE_FLINK_APPLICATION_JARS: OptionalConfigEntry[String] =
+ buildConf("kyuubi.engine.flink.application.jars")
+ .doc("A comma-separated list of the local jars to be shipped with the job to the cluster. " +
+ "For example, SQL UDF jars. Only effective in yarn application mode.")
+ .version("1.8.0")
+ .stringConf
+ .createOptional
+
val SERVER_LIMIT_CONNECTIONS_PER_USER: OptionalConfigEntry[Int] =
buildConf("kyuubi.server.limit.connections.per.user")
.doc("Maximum kyuubi server connections per user." +
diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/ServiceDiscovery.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/ServiceDiscovery.scala
index bdb9b12fe82..a1b1466d122 100644
--- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/ServiceDiscovery.scala
+++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/ServiceDiscovery.scala
@@ -60,6 +60,7 @@ abstract class ServiceDiscovery(
override def start(): Unit = {
discoveryClient.registerService(conf, namespace, this)
+ info(s"Registered $name in namespace ${_namespace}.")
super.start()
}
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
index 63b37f1c5d8..b2b3ce9096a 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
@@ -187,7 +187,7 @@ private[kyuubi] class EngineRef(
conf.setIfMissing(SparkProcessBuilder.APP_KEY, defaultEngineName)
new SparkProcessBuilder(appUser, conf, engineRefId, extraEngineLog)
case FLINK_SQL =>
- conf.setIfMissing(FlinkProcessBuilder.APP_KEY, defaultEngineName)
+ conf.setIfMissing(FlinkProcessBuilder.YARN_APP_KEY, defaultEngineName)
new FlinkProcessBuilder(appUser, conf, engineRefId, extraEngineLog)
case TRINO =>
new TrinoProcessBuilder(appUser, conf, engineRefId, extraEngineLog)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala
index 9b23e550d07..02aed2866d6 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala
@@ -105,10 +105,10 @@ object KyuubiApplicationManager {
conf.set("spark.kubernetes.driver.label." + LABEL_KYUUBI_UNIQUE_KEY, tag)
}
- private def setupFlinkK8sTag(tag: String, conf: KyuubiConf): Unit = {
- val originalTag = conf.getOption(FlinkProcessBuilder.TAG_KEY).map(_ + ",").getOrElse("")
+ private def setupFlinkYarnTag(tag: String, conf: KyuubiConf): Unit = {
+ val originalTag = conf.getOption(FlinkProcessBuilder.YARN_TAG_KEY).map(_ + ",").getOrElse("")
val newTag = s"${originalTag}KYUUBI" + Some(tag).filterNot(_.isEmpty).map("," + _).getOrElse("")
- conf.set(FlinkProcessBuilder.TAG_KEY, newTag)
+ conf.set(FlinkProcessBuilder.YARN_TAG_KEY, newTag)
}
val uploadWorkDir: Path = {
@@ -178,7 +178,7 @@ object KyuubiApplicationManager {
setupSparkK8sTag(applicationTag, conf)
case ("FLINK", _) =>
// running flink on other platforms is not yet supported
- setupFlinkK8sTag(applicationTag, conf)
+ setupFlinkYarnTag(applicationTag, conf)
// other engine types are running locally yet
case _ =>
}
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala
index b8146c4d2b6..8642d87d7f6 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala
@@ -21,7 +21,7 @@ import java.io.{File, FilenameFilter}
import java.nio.file.{Files, Paths}
import scala.collection.JavaConverters._
-import scala.collection.mutable.ArrayBuffer
+import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import com.google.common.annotations.VisibleForTesting
@@ -50,88 +50,150 @@ class FlinkProcessBuilder(
val flinkHome: String = getEngineHome(shortName)
+ val flinkExecutable: String = {
+ Paths.get(flinkHome, "bin", FLINK_EXEC_FILE).toFile.getCanonicalPath
+ }
+
override protected def module: String = "kyuubi-flink-sql-engine"
override protected def mainClass: String = "org.apache.kyuubi.engine.flink.FlinkSQLEngine"
override def env: Map[String, String] = conf.getEnvs +
- (FLINK_PROXY_USER_KEY -> proxyUser)
+ ("FLINK_CONF_DIR" -> conf.getEnvs.getOrElse(
+ "FLINK_CONF_DIR",
+ s"$flinkHome${File.separator}conf"))
+
+ override def clusterManager(): Option[String] = Some("yarn")
override protected val commands: Array[String] = {
KyuubiApplicationManager.tagApplication(engineRefId, shortName, clusterManager(), conf)
- val buffer = new ArrayBuffer[String]()
- buffer += executable
-
- val memory = conf.get(ENGINE_FLINK_MEMORY)
- buffer += s"-Xmx$memory"
- val javaOptions = conf.get(ENGINE_FLINK_JAVA_OPTIONS)
- if (javaOptions.isDefined) {
- buffer += javaOptions.get
- }
- buffer += "-cp"
- val classpathEntries = new java.util.LinkedHashSet[String]
- // flink engine runtime jar
- mainResource.foreach(classpathEntries.add)
- // flink sql client jar
- val flinkSqlClientPath = Paths.get(flinkHome)
- .resolve("opt")
- .toFile
- .listFiles(new FilenameFilter {
- override def accept(dir: File, name: String): Boolean = {
- name.toLowerCase.startsWith("flink-sql-client")
+ // flink.execution.target are required in Kyuubi conf currently
+ val executionTarget = conf.getOption("flink.execution.target")
+ executionTarget match {
+ case Some("yarn-application") =>
+ val buffer = new ArrayBuffer[String]()
+ buffer += flinkExecutable
+ buffer += "run-application"
+
+ val flinkExtraJars = new ListBuffer[String]
+ // locate flink sql jars
+ val flinkSqlJars = Paths.get(flinkHome)
+ .resolve("opt")
+ .toFile
+ .listFiles(new FilenameFilter {
+ override def accept(dir: File, name: String): Boolean = {
+ name.toLowerCase.startsWith("flink-sql-client") ||
+ name.toLowerCase.startsWith("flink-sql-gateway")
+ }
+ }).map(f => f.getAbsolutePath).sorted
+ flinkExtraJars ++= flinkSqlJars
+
+ val userJars = conf.get(ENGINE_FLINK_APPLICATION_JARS)
+ userJars.foreach(jars => flinkExtraJars ++= jars.split(","))
+
+ buffer += "-t"
+ buffer += "yarn-application"
+ buffer += s"-Dyarn.ship-files=${flinkExtraJars.mkString(";")}"
+ buffer += s"-Dyarn.tags=${conf.getOption(YARN_TAG_KEY).get}"
+ buffer += "-Dcontainerized.master.env.FLINK_CONF_DIR=."
+
+ val customFlinkConf = conf.getAllWithPrefix("flink", "")
+ customFlinkConf.foreach { case (k, v) =>
+ buffer += s"-D$k=$v"
}
- }).head.getAbsolutePath
- classpathEntries.add(flinkSqlClientPath)
-
- // jars from flink lib
- classpathEntries.add(s"$flinkHome${File.separator}lib${File.separator}*")
-
- // classpath contains flink configurations, default to flink.home/conf
- classpathEntries.add(env.getOrElse("FLINK_CONF_DIR", s"$flinkHome${File.separator}conf"))
- // classpath contains hadoop configurations
- env.get("HADOOP_CONF_DIR").foreach(classpathEntries.add)
- env.get("YARN_CONF_DIR").foreach(classpathEntries.add)
- env.get("HBASE_CONF_DIR").foreach(classpathEntries.add)
- val hadoopCp = env.get(FLINK_HADOOP_CLASSPATH_KEY)
- hadoopCp.foreach(classpathEntries.add)
- val extraCp = conf.get(ENGINE_FLINK_EXTRA_CLASSPATH)
- extraCp.foreach(classpathEntries.add)
- if (hadoopCp.isEmpty && extraCp.isEmpty) {
- warn(s"The conf of ${FLINK_HADOOP_CLASSPATH_KEY} and ${ENGINE_FLINK_EXTRA_CLASSPATH.key}" +
- s" is empty.")
- debug("Detected development environment")
- mainResource.foreach { path =>
- val devHadoopJars = Paths.get(path).getParent
- .resolve(s"scala-$SCALA_COMPILE_VERSION")
- .resolve("jars")
- if (!Files.exists(devHadoopJars)) {
- throw new KyuubiException(s"The path $devHadoopJars does not exists. " +
- s"Please set ${FLINK_HADOOP_CLASSPATH_KEY} or ${ENGINE_FLINK_EXTRA_CLASSPATH.key} " +
- s"for configuring location of hadoop client jars, etc")
+
+ buffer += "-c"
+ buffer += s"$mainClass"
+ buffer += s"${mainResource.get}"
+
+ buffer += "--conf"
+ buffer += s"$KYUUBI_SESSION_USER_KEY=$proxyUser"
+ conf.getAll.foreach { case (k, v) =>
+ if (k.startsWith("kyuubi.")) {
+ buffer += "--conf"
+ buffer += s"$k=$v"
+ }
}
- classpathEntries.add(s"$devHadoopJars${File.separator}*")
- }
- }
- buffer += classpathEntries.asScala.mkString(File.pathSeparator)
- buffer += mainClass
- buffer += "--conf"
- buffer += s"$KYUUBI_SESSION_USER_KEY=$proxyUser"
+ buffer.toArray
+
+ case _ =>
+ val buffer = new ArrayBuffer[String]()
+ buffer += executable
- for ((k, v) <- conf.getAll) {
- buffer += "--conf"
- buffer += s"$k=$v"
+ val memory = conf.get(ENGINE_FLINK_MEMORY)
+ buffer += s"-Xmx$memory"
+ val javaOptions = conf.get(ENGINE_FLINK_JAVA_OPTIONS)
+ if (javaOptions.isDefined) {
+ buffer += javaOptions.get
+ }
+
+ buffer += "-cp"
+ val classpathEntries = new java.util.LinkedHashSet[String]
+ // flink engine runtime jar
+ mainResource.foreach(classpathEntries.add)
+ // flink sql client jar
+ val flinkSqlClientPath = Paths.get(flinkHome)
+ .resolve("opt")
+ .toFile
+ .listFiles(new FilenameFilter {
+ override def accept(dir: File, name: String): Boolean = {
+ name.toLowerCase.startsWith("flink-sql-client")
+ }
+ }).head.getAbsolutePath
+ classpathEntries.add(flinkSqlClientPath)
+
+ // jars from flink lib
+ classpathEntries.add(s"$flinkHome${File.separator}lib${File.separator}*")
+
+ // classpath contains flink configurations, default to flink.home/conf
+ classpathEntries.add(env.getOrElse("FLINK_CONF_DIR", s"$flinkHome${File.separator}conf"))
+ // classpath contains hadoop configurations
+ env.get("HADOOP_CONF_DIR").foreach(classpathEntries.add)
+ env.get("YARN_CONF_DIR").foreach(classpathEntries.add)
+ env.get("HBASE_CONF_DIR").foreach(classpathEntries.add)
+ val hadoopCp = env.get(FLINK_HADOOP_CLASSPATH_KEY)
+ hadoopCp.foreach(classpathEntries.add)
+ val extraCp = conf.get(ENGINE_FLINK_EXTRA_CLASSPATH)
+ extraCp.foreach(classpathEntries.add)
+ if (hadoopCp.isEmpty && extraCp.isEmpty) {
+ warn(s"The conf of ${FLINK_HADOOP_CLASSPATH_KEY} and " +
+ s"${ENGINE_FLINK_EXTRA_CLASSPATH.key} is empty.")
+ debug("Detected development environment.")
+ mainResource.foreach { path =>
+ val devHadoopJars = Paths.get(path).getParent
+ .resolve(s"scala-$SCALA_COMPILE_VERSION")
+ .resolve("jars")
+ if (!Files.exists(devHadoopJars)) {
+ throw new KyuubiException(s"The path $devHadoopJars does not exists. " +
+ s"Please set ${FLINK_HADOOP_CLASSPATH_KEY} or ${ENGINE_FLINK_EXTRA_CLASSPATH.key}" +
+ s" for configuring location of hadoop client jars, etc.")
+ }
+ classpathEntries.add(s"$devHadoopJars${File.separator}*")
+ }
+ }
+ buffer += classpathEntries.asScala.mkString(File.pathSeparator)
+ buffer += mainClass
+
+ buffer += "--conf"
+ buffer += s"$KYUUBI_SESSION_USER_KEY=$proxyUser"
+
+ conf.getAll.foreach { case (k, v) =>
+ buffer += "--conf"
+ buffer += s"$k=$v"
+ }
+ buffer.toArray
}
- buffer.toArray
}
override def shortName: String = "flink"
}
object FlinkProcessBuilder {
- final val APP_KEY = "yarn.application.name"
- final val TAG_KEY = "yarn.tags"
+ final val FLINK_EXEC_FILE = "flink"
+ final val YARN_APP_KEY = "yarn.application.name"
+ final val YARN_TAG_KEY = "yarn.tags"
final val FLINK_HADOOP_CLASSPATH_KEY = "FLINK_HADOOP_CLASSPATH"
final val FLINK_PROXY_USER_KEY = "HADOOP_PROXY_USER"
}
diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala
index 7ee38d4ef99..53450b5897a 100644
--- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala
+++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala
@@ -18,6 +18,7 @@
package org.apache.kyuubi.engine.flink
import java.io.File
+import java.nio.file.{Files, Paths}
import scala.collection.JavaConverters._
import scala.collection.immutable.ListMap
@@ -25,18 +26,36 @@ import scala.util.matching.Regex
import org.apache.kyuubi.KyuubiFunSuite
import org.apache.kyuubi.config.KyuubiConf
-import org.apache.kyuubi.config.KyuubiConf.{ENGINE_FLINK_EXTRA_CLASSPATH, ENGINE_FLINK_JAVA_OPTIONS, ENGINE_FLINK_MEMORY}
+import org.apache.kyuubi.config.KyuubiConf.{ENGINE_FLINK_APPLICATION_JARS, ENGINE_FLINK_EXTRA_CLASSPATH, ENGINE_FLINK_JAVA_OPTIONS, ENGINE_FLINK_MEMORY}
import org.apache.kyuubi.engine.flink.FlinkProcessBuilder._
class FlinkProcessBuilderSuite extends KyuubiFunSuite {
- private def conf = KyuubiConf().set("kyuubi.on", "off")
+ private def sessionModeConf = KyuubiConf()
+ .set("flink.execution.target", "yarn-session")
+ .set("kyuubi.on", "off")
.set(ENGINE_FLINK_MEMORY, "512m")
.set(
ENGINE_FLINK_JAVA_OPTIONS,
"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005")
+ private def applicationModeConf = KyuubiConf()
+ .set("flink.execution.target", "yarn-application")
+ .set(ENGINE_FLINK_APPLICATION_JARS, tempUdfJar.toString)
+ .set("kyuubi.on", "off")
+
+ private val tempFlinkHome = Files.createTempDirectory("flink-home").toFile
+ private val tempOpt =
+ Files.createDirectories(Paths.get(tempFlinkHome.toPath.toString, "opt")).toFile
+ Files.createFile(Paths.get(tempOpt.toPath.toString, "flink-sql-client-1.16.1.jar"))
+ Files.createFile(Paths.get(tempOpt.toPath.toString, "flink-sql-gateway-1.16.1.jar"))
+ private val tempUsrLib =
+ Files.createDirectories(Paths.get(tempFlinkHome.toPath.toString, "usrlib")).toFile
+ private val tempUdfJar =
+ Files.createFile(Paths.get(tempUsrLib.toPath.toString, "test-udf.jar"))
+
private def envDefault: ListMap[String, String] = ListMap(
- "JAVA_HOME" -> s"${File.separator}jdk")
+ "JAVA_HOME" -> s"${File.separator}jdk",
+ "FLINK_HOME" -> s"${tempFlinkHome.toPath}")
private def envWithoutHadoopCLASSPATH: ListMap[String, String] = envDefault +
("HADOOP_CONF_DIR" -> s"${File.separator}hadoop${File.separator}conf") +
("YARN_CONF_DIR" -> s"${File.separator}yarn${File.separator}conf") +
@@ -44,11 +63,12 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite {
private def envWithAllHadoop: ListMap[String, String] = envWithoutHadoopCLASSPATH +
(FLINK_HADOOP_CLASSPATH_KEY -> s"${File.separator}hadoop")
private def confStr: String = {
- conf.clone.set("yarn.tags", "KYUUBI").getAll
+ sessionModeConf.clone.set("yarn.tags", "KYUUBI").getAll
.map { case (k, v) => s"\\\\\\n\\t--conf $k=$v" }
.mkString(" ")
}
- private def matchActualAndExpected(builder: FlinkProcessBuilder): Unit = {
+
+ private def matchActualAndExpectedSessionMode(builder: FlinkProcessBuilder): Unit = {
val actualCommands = builder.toString
val classpathStr = constructClasspathStr(builder)
val expectedCommands =
@@ -59,6 +79,27 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite {
assert(matcher.matches())
}
+ private def matchActualAndExpectedApplicationMode(builder: FlinkProcessBuilder): Unit = {
+ val actualCommands = builder.toString
+ val expectedCommands =
+ escapePaths(s"${builder.flinkExecutable} run-application ") +
+ s"-t yarn-application " +
+ s"-Dyarn.ship-files=.*\\/flink-sql-client.*jar;.*\\/flink-sql-gateway.*jar;$tempUdfJar " +
+ s"-Dyarn\\.tags=KYUUBI " +
+ s"-Dcontainerized\\.master\\.env\\.FLINK_CONF_DIR=\\. " +
+ s"-Dexecution.target=yarn-application " +
+ s"-c org\\.apache\\.kyuubi\\.engine\\.flink\\.FlinkSQLEngine " +
+ s".*kyuubi-flink-sql-engine_.*jar" +
+ s"(?: \\\\\\n\\t--conf \\S+=\\S+)+"
+ val regex = new Regex(expectedCommands)
+ val matcher = regex.pattern.matcher(actualCommands)
+ assert(matcher.matches())
+ }
+
+ private def escapePaths(path: String): String = {
+ path.replaceAll("/", "\\/")
+ }
+
private def constructClasspathStr(builder: FlinkProcessBuilder) = {
val classpathEntries = new java.util.LinkedHashSet[String]
builder.mainResource.foreach(classpathEntries.add)
@@ -69,11 +110,11 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite {
classpathEntries.add(s"$flinkHome$flinkConfPathSuffix")
val envMap = builder.env
envMap.foreach { case (k, v) =>
- if (!k.equals("JAVA_HOME")) {
+ if (!k.equals("JAVA_HOME") && !k.equals("FLINK_HOME")) {
classpathEntries.add(v)
}
}
- val extraCp = conf.get(ENGINE_FLINK_EXTRA_CLASSPATH)
+ val extraCp = sessionModeConf.get(ENGINE_FLINK_EXTRA_CLASSPATH)
extraCp.foreach(classpathEntries.add)
val classpathStr = classpathEntries.asScala.mkString(File.pathSeparator)
classpathStr
@@ -86,18 +127,25 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite {
private val flinkConfPathSuffix = s"${File.separator}conf"
private val mainClassStr = "org.apache.kyuubi.engine.flink.FlinkSQLEngine"
- test("all hadoop related environment variables are configured") {
- val builder = new FlinkProcessBuilder("vinoyang", conf) {
+ test("session mode - all hadoop related environment variables are configured") {
+ val builder = new FlinkProcessBuilder("vinoyang", sessionModeConf) {
override def env: Map[String, String] = envWithAllHadoop
}
- matchActualAndExpected(builder)
+ matchActualAndExpectedSessionMode(builder)
}
- test("only FLINK_HADOOP_CLASSPATH environment variables are configured") {
- val builder = new FlinkProcessBuilder("vinoyang", conf) {
+ test("session mode - only FLINK_HADOOP_CLASSPATH environment variables are configured") {
+ val builder = new FlinkProcessBuilder("vinoyang", sessionModeConf) {
override def env: Map[String, String] = envDefault +
(FLINK_HADOOP_CLASSPATH_KEY -> s"${File.separator}hadoop")
}
- matchActualAndExpected(builder)
+ matchActualAndExpectedSessionMode(builder)
+ }
+
+ test("application mode - default env") {
+ val builder = new FlinkProcessBuilder("paullam", applicationModeConf) {
+ override def env: Map[String, String] = envDefault
+ }
+ matchActualAndExpectedApplicationMode(builder)
}
}
diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/MiniYarnService.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/MiniYarnService.scala
index 1a73cc24ca0..68a175efc4e 100644
--- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/MiniYarnService.scala
+++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/MiniYarnService.scala
@@ -34,7 +34,7 @@ import org.apache.kyuubi.service.AbstractService
class MiniYarnService extends AbstractService("TestMiniYarnService") {
private val hadoopConfDir: File = Utils.createTempDir().toFile
- private val yarnConf: YarnConfiguration = {
+ private var yarnConf: YarnConfiguration = {
val yarnConfig = new YarnConfiguration()
// Disable the disk utilization check to avoid the test hanging when people's disks are
// getting full.
@@ -71,6 +71,10 @@ class MiniYarnService extends AbstractService("TestMiniYarnService") {
}
private val yarnCluster: MiniYARNCluster = new MiniYARNCluster(getName, 1, 1, 1)
+ def setYarnConf(yarnConf: YarnConfiguration): Unit = {
+ this.yarnConf = yarnConf
+ }
+
override def initialize(conf: KyuubiConf): Unit = {
yarnCluster.init(yarnConf)
super.initialize(conf)
From 5faebb1e75b57491ca655e0ba6fceacde5ef9459 Mon Sep 17 00:00:00 2001
From: Karsonnel <747100667@qq.com>
Date: Fri, 7 Apr 2023 18:54:14 +0800
Subject: [PATCH 016/404] [KYUUBI #4658][FOLLOWUP] Improve unit tests
### _Why are the changes needed?_
To allow up the reviewer's comment in https://github.com/apache/kyuubi/issues/4660.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4661 from Karsonnel/4658-authz-insert-follow-up.
Closes #4658
3ce7efc3d [Karsonnel] add e2e test for InsertIntoDatasourceCommand
2c8e3469a [Karsonnel] rename test
1349c2b02 [Karsonnel] fix test assert text
d2f04ca45 [Karsonnel] fix test
8f86bb14b [Karsonnel] Resolve reviewer's comment in pr #4660
Authored-by: Karsonnel <747100667@qq.com>
Signed-off-by: Cheng Pan
---
.../ranger/RangerSparkExtensionSuite.scala | 25 ++++++++++++++++---
1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala
index 2d108615e4c..beef36d5dda 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala
@@ -708,20 +708,37 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
}
}
- test("[KYUUBI #4658] INSERT OVERWRITE DIRECTORY did check query permission") {
+ test("[KYUUBI #4658] insert overwrite hive directory") {
val db1 = "default"
val table = "src"
withCleanTmpResources(Seq((s"$db1.$table", "table"))) {
- doAs("bob", sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
- val e1 = intercept[AccessControlException](
+ doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
+ val e = intercept[AccessControlException](
doAs(
"someone",
sql(
s"""INSERT OVERWRITE DIRECTORY '/tmp/test_dir' ROW FORMAT DELIMITED FIELDS
| TERMINATED BY ','
| SELECT * FROM $db1.$table;""".stripMargin)))
- assert(e1.getMessage.contains(s"does not have [select] privilege on [$db1/$table/id"))
+ assert(e.getMessage.contains(s"does not have [select] privilege on [$db1/$table/id]"))
+ }
+ }
+
+ test("[KYUUBI #4658] insert overwrite datasource directory") {
+ val db1 = "default"
+ val table = "src"
+
+ withCleanTmpResources(Seq((s"$db1.$table", "table"))) {
+ doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
+ val e = intercept[AccessControlException](
+ doAs(
+ "someone",
+ sql(
+ s"""INSERT OVERWRITE DIRECTORY '/tmp/test_dir'
+ | USING parquet
+ | SELECT * FROM $db1.$table;""".stripMargin)))
+ assert(e.getMessage.contains(s"does not have [select] privilege on [$db1/$table/id]"))
}
}
}
From 1a651254cb9dec71082e9cfadd58a4dbbd976d1f Mon Sep 17 00:00:00 2001
From: Fu Chen
Date: Mon, 10 Apr 2023 09:43:30 +0800
Subject: [PATCH 017/404] [KYUUBI #4662] [ARROW] Arrow serialization should not
introduce extra shuffle for outermost limit
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
### _Why are the changes needed?_
The fundamental concept is to execute a job similar to the way in which `CollectLimitExec.executeCollect()` operates.
```sql
select * from parquet.`parquet/tpcds/sf1000/catalog_sales` limit 20;
```
Before this PR:
![截屏2023-04-04 下午3 20 34](https://user-images.githubusercontent.com/8537877/229717946-87c480c6-9550-4d00-bc96-14d59d7ce9f7.png)
![截屏2023-04-04 下午3 20 54](https://user-images.githubusercontent.com/8537877/229717973-bf6da5af-74e7-422a-b9fa-8b7bebd43320.png)
After this PR:
![截屏2023-04-04 下午3 17 05](https://user-images.githubusercontent.com/8537877/229718016-6218d019-b223-4deb-b596-6f0431d33d2a.png)
![截屏2023-04-04 下午3 17 16](https://user-images.githubusercontent.com/8537877/229718046-ea07cd1f-5ffc-42ba-87d5-08085feb4b16.png)
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4662 from cfmcgrady/arrow-collect-limit-exec-2.
Closes #4662
82c912ed6 [Fu Chen] close vector
130bcb141 [Fu Chen] finally close
facc13f78 [Fu Chen] exclude rule OptimizeLimitZero
370083910 [Fu Chen] SparkArrowbasedOperationSuite adapt Spark-3.1.x
6064ab961 [Fu Chen] limit = 0 test case
6d596fcce [Fu Chen] address comment
8280783c3 [Fu Chen] add `isStaticConfigKey` to adapt Spark-3.1.x
22cc70fba [Fu Chen] add ut
b72bc6fb2 [Fu Chen] add offset support to adapt Spark-3.4.x
9ffb44fb2 [Fu Chen] make toBatchIterator private
c83cf3f5e [Fu Chen] SparkArrowbasedOperationSuite adapt Spark-3.1.x
573a262ed [Fu Chen] fix
4cef20481 [Fu Chen] SparkArrowbasedOperationSuite adapt Spark-3.1.x
d70aee36b [Fu Chen] SparkPlan.session -> SparkSession.active to adapt Spark-3.1.x
e3bf84c03 [Fu Chen] refactor
81886f01c [Fu Chen] address comment
2286afc6b [Fu Chen] reflective calla AdaptiveSparkPlanExec.finalPhysicalPlan
03d074732 [Fu Chen] address comment
25e4f056c [Fu Chen] add docs
885cf2c71 [Fu Chen] infer row size by schema.defaultSize
4e7ca54df [Fu Chen] unnecessarily changes
ee5a7567a [Fu Chen] revert unnecessarily changes
6c5b1eb61 [Fu Chen] add ut
4212a8967 [Fu Chen] refactor and add ut
ed8c6928b [Fu Chen] refactor
008867122 [Fu Chen] refine
8593d856a [Fu Chen] driver slice last batch
a5849430a [Fu Chen] arrow take
Authored-by: Fu Chen
Signed-off-by: ulyssesyou
---
externals/kyuubi-spark-sql-engine/pom.xml | 7 +
.../spark/operation/ExecuteStatement.scala | 32 +-
.../arrow/KyuubiArrowConverters.scala | 321 ++++++++++++++++++
.../spark/sql/kyuubi/SparkDatasetHelper.scala | 160 ++++++++-
.../SparkArrowbasedOperationSuite.scala | 260 +++++++++++++-
.../spark/KyuubiSparkContextHelper.scala | 2 +
pom.xml | 4 +-
7 files changed, 753 insertions(+), 33 deletions(-)
create mode 100644 externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
diff --git a/externals/kyuubi-spark-sql-engine/pom.xml b/externals/kyuubi-spark-sql-engine/pom.xml
index 5b227cb5e29..8c984e4cab4 100644
--- a/externals/kyuubi-spark-sql-engine/pom.xml
+++ b/externals/kyuubi-spark-sql-engine/pom.xml
@@ -65,6 +65,13 @@
provided
+
+ org.apache.spark
+ spark-sql_${scala.binary.version}
+ test-jar
+ test
+
+
org.apache.sparkspark-repl_${scala.binary.version}
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala
index b29d2ca9a7e..ca30f53001f 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/ExecuteStatement.scala
@@ -21,10 +21,8 @@ import java.util.concurrent.RejectedExecutionException
import scala.collection.JavaConverters._
-import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
-import org.apache.spark.sql.execution.SQLExecution
-import org.apache.spark.sql.kyuubi.SparkDatasetHelper
+import org.apache.spark.sql.kyuubi.SparkDatasetHelper._
import org.apache.spark.sql.types._
import org.apache.kyuubi.{KyuubiSQLException, Logging}
@@ -187,34 +185,15 @@ class ArrowBasedExecuteStatement(
handle) {
override protected def incrementalCollectResult(resultDF: DataFrame): Iterator[Any] = {
- collectAsArrow(convertComplexType(resultDF)) { rdd =>
- rdd.toLocalIterator
- }
+ toArrowBatchLocalIterator(convertComplexType(resultDF))
}
override protected def fullCollectResult(resultDF: DataFrame): Array[_] = {
- collectAsArrow(convertComplexType(resultDF)) { rdd =>
- rdd.collect()
- }
+ executeCollect(convertComplexType(resultDF))
}
override protected def takeResult(resultDF: DataFrame, maxRows: Int): Array[_] = {
- // this will introduce shuffle and hurt performance
- val limitedResult = resultDF.limit(maxRows)
- collectAsArrow(convertComplexType(limitedResult)) { rdd =>
- rdd.collect()
- }
- }
-
- /**
- * refer to org.apache.spark.sql.Dataset#withAction(), assign a new execution id for arrow-based
- * operation, so that we can track the arrow-based queries on the UI tab.
- */
- private def collectAsArrow[T](df: DataFrame)(action: RDD[Array[Byte]] => T): T = {
- SQLExecution.withNewExecutionId(df.queryExecution, Some("collectAsArrow")) {
- df.queryExecution.executedPlan.resetMetrics()
- action(SparkDatasetHelper.toArrowBatchRdd(df))
- }
+ executeCollect(convertComplexType(resultDF.limit(maxRows)))
}
override protected def isArrowBasedOperation: Boolean = true
@@ -222,7 +201,6 @@ class ArrowBasedExecuteStatement(
override val resultFormat = "arrow"
private def convertComplexType(df: DataFrame): DataFrame = {
- SparkDatasetHelper.convertTopLevelComplexTypeToHiveString(df, timestampAsString)
+ convertTopLevelComplexTypeToHiveString(df, timestampAsString)
}
-
}
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
new file mode 100644
index 00000000000..dd6163ec97c
--- /dev/null
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.arrow
+
+import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
+import java.nio.channels.Channels
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.arrow.vector._
+import org.apache.arrow.vector.ipc.{ArrowStreamWriter, ReadChannel, WriteChannel}
+import org.apache.arrow.vector.ipc.message.{IpcOption, MessageSerializer}
+import org.apache.spark.TaskContext
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.{InternalRow, SQLConfHelper}
+import org.apache.spark.sql.catalyst.expressions.UnsafeRow
+import org.apache.spark.sql.execution.CollectLimitExec
+import org.apache.spark.sql.types._
+import org.apache.spark.sql.util.ArrowUtils
+import org.apache.spark.util.Utils
+
+object KyuubiArrowConverters extends SQLConfHelper with Logging {
+
+ type Batch = (Array[Byte], Long)
+
+ /**
+ * this method is to slice the input Arrow record batch byte array `bytes`, starting from `start`
+ * and taking `length` number of elements.
+ */
+ def slice(
+ schema: StructType,
+ timeZoneId: String,
+ bytes: Array[Byte],
+ start: Int,
+ length: Int): Array[Byte] = {
+ val in = new ByteArrayInputStream(bytes)
+ val out = new ByteArrayOutputStream(bytes.length)
+
+ var vectorSchemaRoot: VectorSchemaRoot = null
+ var slicedVectorSchemaRoot: VectorSchemaRoot = null
+
+ val sliceAllocator = ArrowUtils.rootAllocator.newChildAllocator(
+ "slice",
+ 0,
+ Long.MaxValue)
+ val arrowSchema = ArrowUtils.toArrowSchema(schema, timeZoneId)
+ vectorSchemaRoot = VectorSchemaRoot.create(arrowSchema, sliceAllocator)
+ try {
+ val recordBatch = MessageSerializer.deserializeRecordBatch(
+ new ReadChannel(Channels.newChannel(in)),
+ sliceAllocator)
+ val vectorLoader = new VectorLoader(vectorSchemaRoot)
+ vectorLoader.load(recordBatch)
+ recordBatch.close()
+ slicedVectorSchemaRoot = vectorSchemaRoot.slice(start, length)
+
+ val unloader = new VectorUnloader(slicedVectorSchemaRoot)
+ val writeChannel = new WriteChannel(Channels.newChannel(out))
+ val batch = unloader.getRecordBatch()
+ MessageSerializer.serialize(writeChannel, batch)
+ batch.close()
+ out.toByteArray()
+ } finally {
+ in.close()
+ out.close()
+ if (vectorSchemaRoot != null) {
+ vectorSchemaRoot.getFieldVectors.asScala.foreach(_.close())
+ vectorSchemaRoot.close()
+ }
+ if (slicedVectorSchemaRoot != null) {
+ slicedVectorSchemaRoot.getFieldVectors.asScala.foreach(_.close())
+ slicedVectorSchemaRoot.close()
+ }
+ sliceAllocator.close()
+ }
+ }
+
+ /**
+ * Forked from `org.apache.spark.sql.execution.SparkPlan#executeTake()`, the algorithm can be
+ * summarized in the following steps:
+ * 1. If the limit specified in the CollectLimitExec object is 0, the function returns an empty
+ * array of batches.
+ * 2. Otherwise, execute the child query plan of the CollectLimitExec object to obtain an RDD of
+ * data to collect.
+ * 3. Use an iterative approach to collect data in batches until the specified limit is reached.
+ * In each iteration, it selects a subset of the partitions of the RDD to scan and tries to
+ * collect data from them.
+ * 4. For each partition subset, we use the runJob method of the Spark context to execute a
+ * closure that scans the partition data and converts it to Arrow batches.
+ * 5. Check if the collected data reaches the specified limit. If not, it selects another subset
+ * of partitions to scan and repeats the process until the limit is reached or all partitions
+ * have been scanned.
+ * 6. Return an array of all the collected Arrow batches.
+ *
+ * Note that:
+ * 1. The returned Arrow batches row count >= limit, if the input df has more than the `limit`
+ * row count
+ * 2. We don't implement the `takeFromEnd` logical
+ *
+ * @return
+ */
+ def takeAsArrowBatches(
+ collectLimitExec: CollectLimitExec,
+ maxRecordsPerBatch: Long,
+ maxEstimatedBatchSize: Long,
+ timeZoneId: String): Array[Batch] = {
+ val n = collectLimitExec.limit
+ val schema = collectLimitExec.schema
+ if (n == 0) {
+ return new Array[Batch](0)
+ } else {
+ val limitScaleUpFactor = Math.max(conf.limitScaleUpFactor, 2)
+ // TODO: refactor and reuse the code from RDD's take()
+ val childRDD = collectLimitExec.child.execute()
+ val buf = new ArrayBuffer[Batch]
+ var bufferedRowSize = 0L
+ val totalParts = childRDD.partitions.length
+ var partsScanned = 0
+ while (bufferedRowSize < n && partsScanned < totalParts) {
+ // The number of partitions to try in this iteration. It is ok for this number to be
+ // greater than totalParts because we actually cap it at totalParts in runJob.
+ var numPartsToTry = limitInitialNumPartitions
+ if (partsScanned > 0) {
+ // If we didn't find any rows after the previous iteration, multiply by
+ // limitScaleUpFactor and retry. Otherwise, interpolate the number of partitions we need
+ // to try, but overestimate it by 50%. We also cap the estimation in the end.
+ if (buf.isEmpty) {
+ numPartsToTry = partsScanned * limitScaleUpFactor
+ } else {
+ val left = n - bufferedRowSize
+ // As left > 0, numPartsToTry is always >= 1
+ numPartsToTry = Math.ceil(1.5 * left * partsScanned / bufferedRowSize).toInt
+ numPartsToTry = Math.min(numPartsToTry, partsScanned * limitScaleUpFactor)
+ }
+ }
+
+ val partsToScan =
+ partsScanned.until(math.min(partsScanned + numPartsToTry, totalParts).toInt)
+
+ // TODO: SparkPlan.session introduced in SPARK-35798, replace with SparkPlan.session once we
+ // drop Spark-3.1.x support.
+ val sc = SparkSession.active.sparkContext
+ val res = sc.runJob(
+ childRDD,
+ (it: Iterator[InternalRow]) => {
+ val batches = toBatchIterator(
+ it,
+ schema,
+ maxRecordsPerBatch,
+ maxEstimatedBatchSize,
+ n,
+ timeZoneId)
+ batches.map(b => b -> batches.rowCountInLastBatch).toArray
+ },
+ partsToScan)
+
+ var i = 0
+ while (bufferedRowSize < n && i < res.length) {
+ var j = 0
+ val batches = res(i)
+ while (j < batches.length && n > bufferedRowSize) {
+ val batch = batches(j)
+ val (_, batchSize) = batch
+ buf += batch
+ bufferedRowSize += batchSize
+ j += 1
+ }
+ i += 1
+ }
+ partsScanned += partsToScan.size
+ }
+
+ buf.toArray
+ }
+ }
+
+ /**
+ * Spark introduced the config `spark.sql.limit.initialNumPartitions` since 3.4.0. see SPARK-40211
+ */
+ private def limitInitialNumPartitions: Int = {
+ conf.getConfString("spark.sql.limit.initialNumPartitions", "1")
+ .toInt
+ }
+
+ /**
+ * Different from [[org.apache.spark.sql.execution.arrow.ArrowConverters.toBatchIterator]],
+ * each output arrow batch contains this batch row count.
+ */
+ private def toBatchIterator(
+ rowIter: Iterator[InternalRow],
+ schema: StructType,
+ maxRecordsPerBatch: Long,
+ maxEstimatedBatchSize: Long,
+ limit: Long,
+ timeZoneId: String): ArrowBatchIterator = {
+ new ArrowBatchIterator(
+ rowIter,
+ schema,
+ maxRecordsPerBatch,
+ maxEstimatedBatchSize,
+ limit,
+ timeZoneId,
+ TaskContext.get)
+ }
+
+ /**
+ * This class ArrowBatchIterator is derived from
+ * [[org.apache.spark.sql.execution.arrow.ArrowConverters.ArrowBatchWithSchemaIterator]],
+ * with two key differences:
+ * 1. there is no requirement to write the schema at the batch header
+ * 2. iteration halts when `rowCount` equals `limit`
+ */
+ private[sql] class ArrowBatchIterator(
+ rowIter: Iterator[InternalRow],
+ schema: StructType,
+ maxRecordsPerBatch: Long,
+ maxEstimatedBatchSize: Long,
+ limit: Long,
+ timeZoneId: String,
+ context: TaskContext)
+ extends Iterator[Array[Byte]] {
+
+ protected val arrowSchema = ArrowUtils.toArrowSchema(schema, timeZoneId)
+ private val allocator =
+ ArrowUtils.rootAllocator.newChildAllocator(
+ s"to${this.getClass.getSimpleName}",
+ 0,
+ Long.MaxValue)
+
+ private val root = VectorSchemaRoot.create(arrowSchema, allocator)
+ protected val unloader = new VectorUnloader(root)
+ protected val arrowWriter = ArrowWriter.create(root)
+
+ Option(context).foreach {
+ _.addTaskCompletionListener[Unit] { _ =>
+ root.close()
+ allocator.close()
+ }
+ }
+
+ override def hasNext: Boolean = (rowIter.hasNext && rowCount < limit) || {
+ root.close()
+ allocator.close()
+ false
+ }
+
+ var rowCountInLastBatch: Long = 0
+ var rowCount: Long = 0
+
+ override def next(): Array[Byte] = {
+ val out = new ByteArrayOutputStream()
+ val writeChannel = new WriteChannel(Channels.newChannel(out))
+
+ rowCountInLastBatch = 0
+ var estimatedBatchSize = 0L
+ Utils.tryWithSafeFinally {
+
+ // Always write the first row.
+ while (rowIter.hasNext && (
+ // For maxBatchSize and maxRecordsPerBatch, respect whatever smaller.
+ // If the size in bytes is positive (set properly), always write the first row.
+ rowCountInLastBatch == 0 && maxEstimatedBatchSize > 0 ||
+ // If the size in bytes of rows are 0 or negative, unlimit it.
+ estimatedBatchSize <= 0 ||
+ estimatedBatchSize < maxEstimatedBatchSize ||
+ // If the size of rows are 0 or negative, unlimit it.
+ maxRecordsPerBatch <= 0 ||
+ rowCountInLastBatch < maxRecordsPerBatch ||
+ rowCount < limit)) {
+ val row = rowIter.next()
+ arrowWriter.write(row)
+ estimatedBatchSize += (row match {
+ case ur: UnsafeRow => ur.getSizeInBytes
+ // Trying to estimate the size of the current row
+ case _: InternalRow => schema.defaultSize
+ })
+ rowCountInLastBatch += 1
+ rowCount += 1
+ }
+ arrowWriter.finish()
+ val batch = unloader.getRecordBatch()
+ MessageSerializer.serialize(writeChannel, batch)
+
+ // Always write the Ipc options at the end.
+ ArrowStreamWriter.writeEndOfStream(writeChannel, IpcOption.DEFAULT)
+
+ batch.close()
+ } {
+ arrowWriter.reset()
+ }
+
+ out.toByteArray
+ }
+ }
+
+ // for testing
+ def fromBatchIterator(
+ arrowBatchIter: Iterator[Array[Byte]],
+ schema: StructType,
+ timeZoneId: String,
+ context: TaskContext): Iterator[InternalRow] = {
+ ArrowConverters.fromBatchIterator(arrowBatchIter, schema, timeZoneId, context)
+ }
+}
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala
index 1a542937338..1c8d32c4850 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/kyuubi/SparkDatasetHelper.scala
@@ -17,18 +17,75 @@
package org.apache.spark.sql.kyuubi
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.TaskContext
+import org.apache.spark.internal.Logging
+import org.apache.spark.network.util.{ByteUnit, JavaUtils}
import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.{DataFrame, Dataset, Row}
+import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
+import org.apache.spark.sql.execution.{CollectLimitExec, SparkPlan, SQLExecution}
+import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec
+import org.apache.spark.sql.execution.arrow.{ArrowConverters, KyuubiArrowConverters}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
+import org.apache.kyuubi.engine.spark.KyuubiSparkUtil
import org.apache.kyuubi.engine.spark.schema.RowSet
+import org.apache.kyuubi.reflection.DynMethods
+
+object SparkDatasetHelper extends Logging {
+
+ def executeCollect(df: DataFrame): Array[Array[Byte]] = withNewExecutionId(df) {
+ executeArrowBatchCollect(df.queryExecution.executedPlan)
+ }
+
+ def executeArrowBatchCollect: SparkPlan => Array[Array[Byte]] = {
+ case adaptiveSparkPlan: AdaptiveSparkPlanExec =>
+ executeArrowBatchCollect(finalPhysicalPlan(adaptiveSparkPlan))
+ // TODO: avoid extra shuffle if `offset` > 0
+ case collectLimit: CollectLimitExec if offset(collectLimit) > 0 =>
+ logWarning("unsupported offset > 0, an extra shuffle will be introduced.")
+ toArrowBatchRdd(collectLimit).collect()
+ case collectLimit: CollectLimitExec if collectLimit.limit >= 0 =>
+ doCollectLimit(collectLimit)
+ case collectLimit: CollectLimitExec if collectLimit.limit < 0 =>
+ executeArrowBatchCollect(collectLimit.child)
+ case plan: SparkPlan =>
+ toArrowBatchRdd(plan).collect()
+ }
-object SparkDatasetHelper {
def toArrowBatchRdd[T](ds: Dataset[T]): RDD[Array[Byte]] = {
ds.toArrowBatchRdd
}
+ /**
+ * Forked from [[Dataset.toArrowBatchRdd(plan: SparkPlan)]].
+ * Convert to an RDD of serialized ArrowRecordBatches.
+ */
+ def toArrowBatchRdd(plan: SparkPlan): RDD[Array[Byte]] = {
+ val schemaCaptured = plan.schema
+ // TODO: SparkPlan.session introduced in SPARK-35798, replace with SparkPlan.session once we
+ // drop Spark-3.1.x support.
+ val maxRecordsPerBatch = SparkSession.active.sessionState.conf.arrowMaxRecordsPerBatch
+ val timeZoneId = SparkSession.active.sessionState.conf.sessionLocalTimeZone
+ plan.execute().mapPartitionsInternal { iter =>
+ val context = TaskContext.get()
+ ArrowConverters.toBatchIterator(
+ iter,
+ schemaCaptured,
+ maxRecordsPerBatch,
+ timeZoneId,
+ context)
+ }
+ }
+
+ def toArrowBatchLocalIterator(df: DataFrame): Iterator[Array[Byte]] = {
+ withNewExecutionId(df) {
+ toArrowBatchRdd(df).toLocalIterator
+ }
+ }
+
def convertTopLevelComplexTypeToHiveString(
df: DataFrame,
timestampAsString: Boolean): DataFrame = {
@@ -68,11 +125,108 @@ object SparkDatasetHelper {
* Fork from Apache Spark-3.3.1 org.apache.spark.sql.catalyst.util.quoteIfNeeded to adapt to
* Spark-3.1.x
*/
- def quoteIfNeeded(part: String): String = {
+ private def quoteIfNeeded(part: String): String = {
if (part.matches("[a-zA-Z0-9_]+") && !part.matches("\\d+")) {
part
} else {
s"`${part.replace("`", "``")}`"
}
}
+
+ private lazy val maxBatchSize: Long = {
+ // respect spark connect config
+ KyuubiSparkUtil.globalSparkContext
+ .getConf
+ .getOption("spark.connect.grpc.arrow.maxBatchSize")
+ .orElse(Option("4m"))
+ .map(JavaUtils.byteStringAs(_, ByteUnit.MiB))
+ .get
+ }
+
+ private def doCollectLimit(collectLimit: CollectLimitExec): Array[Array[Byte]] = {
+ // TODO: SparkPlan.session introduced in SPARK-35798, replace with SparkPlan.session once we
+ // drop Spark-3.1.x support.
+ val timeZoneId = SparkSession.active.sessionState.conf.sessionLocalTimeZone
+ val maxRecordsPerBatch = SparkSession.active.sessionState.conf.arrowMaxRecordsPerBatch
+
+ val batches = KyuubiArrowConverters.takeAsArrowBatches(
+ collectLimit,
+ maxRecordsPerBatch,
+ maxBatchSize,
+ timeZoneId)
+
+ // note that the number of rows in the returned arrow batches may be >= `limit`, perform
+ // the slicing operation of result
+ val result = ArrayBuffer[Array[Byte]]()
+ var i = 0
+ var rest = collectLimit.limit
+ while (i < batches.length && rest > 0) {
+ val (batch, size) = batches(i)
+ if (size <= rest) {
+ result += batch
+ // returned ArrowRecordBatch has less than `limit` row count, safety to do conversion
+ rest -= size.toInt
+ } else { // size > rest
+ result += KyuubiArrowConverters.slice(collectLimit.schema, timeZoneId, batch, 0, rest)
+ rest = 0
+ }
+ i += 1
+ }
+ result.toArray
+ }
+
+ /**
+ * This method provides a reflection-based implementation of
+ * [[AdaptiveSparkPlanExec.finalPhysicalPlan]] that enables us to adapt to the Spark runtime
+ * without patching SPARK-41914.
+ *
+ * TODO: Once we drop support for Spark 3.1.x, we can directly call
+ * [[AdaptiveSparkPlanExec.finalPhysicalPlan]].
+ */
+ def finalPhysicalPlan(adaptiveSparkPlanExec: AdaptiveSparkPlanExec): SparkPlan = {
+ withFinalPlanUpdate(adaptiveSparkPlanExec, identity)
+ }
+
+ /**
+ * A reflection-based implementation of [[AdaptiveSparkPlanExec.withFinalPlanUpdate]].
+ */
+ private def withFinalPlanUpdate[T](
+ adaptiveSparkPlanExec: AdaptiveSparkPlanExec,
+ fun: SparkPlan => T): T = {
+ val getFinalPhysicalPlan = DynMethods.builder("getFinalPhysicalPlan")
+ .hiddenImpl(adaptiveSparkPlanExec.getClass)
+ .build()
+ val plan = getFinalPhysicalPlan.invoke[SparkPlan](adaptiveSparkPlanExec)
+ val result = fun(plan)
+ val finalPlanUpdate = DynMethods.builder("finalPlanUpdate")
+ .hiddenImpl(adaptiveSparkPlanExec.getClass)
+ .build()
+ finalPlanUpdate.invoke[Unit](adaptiveSparkPlanExec)
+ result
+ }
+
+ /**
+ * offset support was add since Spark-3.4(set SPARK-28330), to ensure backward compatibility with
+ * earlier versions of Spark, this function uses reflective calls to the "offset".
+ */
+ private def offset(collectLimitExec: CollectLimitExec): Int = {
+ Option(
+ DynMethods.builder("offset")
+ .impl(collectLimitExec.getClass)
+ .orNoop()
+ .build()
+ .invoke[Int](collectLimitExec))
+ .getOrElse(0)
+ }
+
+ /**
+ * refer to org.apache.spark.sql.Dataset#withAction(), assign a new execution id for arrow-based
+ * operation, so that we can track the arrow-based queries on the UI tab.
+ */
+ private def withNewExecutionId[T](df: DataFrame)(body: => T): T = {
+ SQLExecution.withNewExecutionId(df.queryExecution, Some("collectAsArrow")) {
+ df.queryExecution.executedPlan.resetMetrics()
+ body
+ }
+ }
}
diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkArrowbasedOperationSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkArrowbasedOperationSuite.scala
index ae6237bb59c..2ef29b398a3 100644
--- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkArrowbasedOperationSuite.scala
+++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/operation/SparkArrowbasedOperationSuite.scala
@@ -18,16 +18,28 @@
package org.apache.kyuubi.engine.spark.operation
import java.sql.Statement
+import java.util.{Set => JSet}
import org.apache.spark.KyuubiSparkContextHelper
+import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
+import org.apache.spark.sql.{QueryTest, Row, SparkSession}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project}
-import org.apache.spark.sql.execution.QueryExecution
+import org.apache.spark.sql.execution.{CollectLimitExec, QueryExecution, SparkPlan}
+import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec
+import org.apache.spark.sql.execution.arrow.KyuubiArrowConverters
+import org.apache.spark.sql.execution.exchange.Exchange
+import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
+import org.apache.spark.sql.functions.col
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.kyuubi.SparkDatasetHelper
import org.apache.spark.sql.util.QueryExecutionListener
+import org.apache.kyuubi.KyuubiException
import org.apache.kyuubi.config.KyuubiConf
import org.apache.kyuubi.engine.spark.{SparkSQLEngine, WithSparkSQLEngine}
import org.apache.kyuubi.engine.spark.session.SparkSessionImpl
import org.apache.kyuubi.operation.SparkDataTypeTests
+import org.apache.kyuubi.reflection.DynFields
class SparkArrowbasedOperationSuite extends WithSparkSQLEngine with SparkDataTypeTests {
@@ -138,6 +150,155 @@ class SparkArrowbasedOperationSuite extends WithSparkSQLEngine with SparkDataTyp
assert(metrics("numOutputRows").value === 1)
}
+ test("SparkDatasetHelper.executeArrowBatchCollect should return expect row count") {
+ val returnSize = Seq(
+ 0, // spark optimizer guaranty the `limit != 0`, it's just for the sanity check
+ 7, // less than one partition
+ 10, // equal to one partition
+ 13, // between one and two partitions, run two jobs
+ 20, // equal to two partitions
+ 29, // between two and three partitions
+ 1000, // all partitions
+ 1001) // more than total row count
+
+ def runAndCheck(sparkPlan: SparkPlan, expectSize: Int): Unit = {
+ val arrowBinary = SparkDatasetHelper.executeArrowBatchCollect(sparkPlan)
+ val rows = KyuubiArrowConverters.fromBatchIterator(
+ arrowBinary.iterator,
+ sparkPlan.schema,
+ "",
+ KyuubiSparkContextHelper.dummyTaskContext())
+ assert(rows.size == expectSize)
+ }
+
+ val excludedRules = Seq(
+ "org.apache.spark.sql.catalyst.optimizer.EliminateLimits",
+ "org.apache.spark.sql.catalyst.optimizer.OptimizeLimitZero",
+ "org.apache.spark.sql.execution.adaptive.AQEPropagateEmptyRelation").mkString(",")
+ withSQLConf(
+ SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> excludedRules,
+ SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> excludedRules) {
+ // aqe
+ // outermost AdaptiveSparkPlanExec
+ spark.range(1000)
+ .repartitionByRange(100, col("id"))
+ .createOrReplaceTempView("t_1")
+ spark.sql("select * from t_1")
+ .foreachPartition { p: Iterator[Row] =>
+ assert(p.length == 10)
+ ()
+ }
+ returnSize.foreach { size =>
+ val df = spark.sql(s"select * from t_1 limit $size")
+ val headPlan = df.queryExecution.executedPlan.collectLeaves().head
+ if (SPARK_ENGINE_RUNTIME_VERSION >= "3.2") {
+ assert(headPlan.isInstanceOf[AdaptiveSparkPlanExec])
+ val finalPhysicalPlan =
+ SparkDatasetHelper.finalPhysicalPlan(headPlan.asInstanceOf[AdaptiveSparkPlanExec])
+ assert(finalPhysicalPlan.isInstanceOf[CollectLimitExec])
+ }
+ if (size > 1000) {
+ runAndCheck(df.queryExecution.executedPlan, 1000)
+ } else {
+ runAndCheck(df.queryExecution.executedPlan, size)
+ }
+ }
+
+ // outermost CollectLimitExec
+ spark.range(0, 1000, 1, numPartitions = 100)
+ .createOrReplaceTempView("t_2")
+ spark.sql("select * from t_2")
+ .foreachPartition { p: Iterator[Row] =>
+ assert(p.length == 10)
+ ()
+ }
+ returnSize.foreach { size =>
+ val df = spark.sql(s"select * from t_2 limit $size")
+ val plan = df.queryExecution.executedPlan
+ assert(plan.isInstanceOf[CollectLimitExec])
+ if (size > 1000) {
+ runAndCheck(df.queryExecution.executedPlan, 1000)
+ } else {
+ runAndCheck(df.queryExecution.executedPlan, size)
+ }
+ }
+ }
+ }
+
+ test("aqe should work properly") {
+
+ val s = spark
+ import s.implicits._
+
+ spark.sparkContext.parallelize(
+ (1 to 100).map(i => TestData(i, i.toString))).toDF()
+ .createOrReplaceTempView("testData")
+ spark.sparkContext.parallelize(
+ TestData2(1, 1) ::
+ TestData2(1, 2) ::
+ TestData2(2, 1) ::
+ TestData2(2, 2) ::
+ TestData2(3, 1) ::
+ TestData2(3, 2) :: Nil,
+ 2).toDF()
+ .createOrReplaceTempView("testData2")
+
+ withSQLConf(
+ SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
+ SQLConf.SHUFFLE_PARTITIONS.key -> "5",
+ SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
+ val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
+ """
+ |SELECT * FROM(
+ | SELECT * FROM testData join testData2 ON key = a where value = '1'
+ |) LIMIT 1
+ |""".stripMargin)
+ val smj = plan.collect { case smj: SortMergeJoinExec => smj }
+ val bhj = adaptivePlan.collect { case bhj: BroadcastHashJoinExec => bhj }
+ assert(smj.size == 1)
+ assert(bhj.size == 1)
+ }
+ }
+
+ test("result offset support") {
+ assume(SPARK_ENGINE_RUNTIME_VERSION > "3.3")
+ var numStages = 0
+ val listener = new SparkListener {
+ override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
+ numStages = jobStart.stageInfos.length
+ }
+ }
+ withJdbcStatement() { statement =>
+ withSparkListener(listener) {
+ withPartitionedTable("t_3") {
+ statement.executeQuery("select * from t_3 limit 10 offset 10")
+ }
+ KyuubiSparkContextHelper.waitListenerBus(spark)
+ }
+ }
+ // the extra shuffle be introduced if the `offset` > 0
+ assert(numStages == 2)
+ }
+
+ test("arrow serialization should not introduce extra shuffle for outermost limit") {
+ var numStages = 0
+ val listener = new SparkListener {
+ override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
+ numStages = jobStart.stageInfos.length
+ }
+ }
+ withJdbcStatement() { statement =>
+ withSparkListener(listener) {
+ withPartitionedTable("t_3") {
+ statement.executeQuery("select * from t_3 limit 1000")
+ }
+ KyuubiSparkContextHelper.waitListenerBus(spark)
+ }
+ }
+ // Should be only one stage since there is no shuffle.
+ assert(numStages == 1)
+ }
+
private def checkResultSetFormat(statement: Statement, expectFormat: String): Unit = {
val query =
s"""
@@ -177,4 +338,101 @@ class SparkArrowbasedOperationSuite extends WithSparkSQLEngine with SparkDataTyp
.allSessions()
.foreach(_.asInstanceOf[SparkSessionImpl].spark.listenerManager.unregister(listener))
}
+
+ private def withSparkListener[T](listener: SparkListener)(body: => T): T = {
+ withAllSessions(s => s.sparkContext.addSparkListener(listener))
+ try {
+ body
+ } finally {
+ withAllSessions(s => s.sparkContext.removeSparkListener(listener))
+ }
+
+ }
+
+ private def withPartitionedTable[T](viewName: String)(body: => T): T = {
+ withAllSessions { spark =>
+ spark.range(0, 1000, 1, numPartitions = 100)
+ .createOrReplaceTempView(viewName)
+ }
+ try {
+ body
+ } finally {
+ withAllSessions { spark =>
+ spark.sql(s"DROP VIEW IF EXISTS $viewName")
+ }
+ }
+ }
+
+ private def withAllSessions(op: SparkSession => Unit): Unit = {
+ SparkSQLEngine.currentEngine.get
+ .backendService
+ .sessionManager
+ .allSessions()
+ .map(_.asInstanceOf[SparkSessionImpl].spark)
+ .foreach(op(_))
+ }
+
+ private def runAdaptiveAndVerifyResult(query: String): (SparkPlan, SparkPlan) = {
+ val dfAdaptive = spark.sql(query)
+ val planBefore = dfAdaptive.queryExecution.executedPlan
+ val result = dfAdaptive.collect()
+ withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") {
+ val df = spark.sql(query)
+ QueryTest.checkAnswer(df, df.collect().toSeq)
+ }
+ val planAfter = dfAdaptive.queryExecution.executedPlan
+ val adaptivePlan = planAfter.asInstanceOf[AdaptiveSparkPlanExec].executedPlan
+ val exchanges = adaptivePlan.collect {
+ case e: Exchange => e
+ }
+ assert(exchanges.isEmpty, "The final plan should not contain any Exchange node.")
+ (dfAdaptive.queryExecution.sparkPlan, adaptivePlan)
+ }
+
+ /**
+ * Sets all SQL configurations specified in `pairs`, calls `f`, and then restores all SQL
+ * configurations.
+ */
+ protected def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
+ val conf = SQLConf.get
+ val (keys, values) = pairs.unzip
+ val currentValues = keys.map { key =>
+ if (conf.contains(key)) {
+ Some(conf.getConfString(key))
+ } else {
+ None
+ }
+ }
+ (keys, values).zipped.foreach { (k, v) =>
+ if (isStaticConfigKey(k)) {
+ throw new KyuubiException(s"Cannot modify the value of a static config: $k")
+ }
+ conf.setConfString(k, v)
+ }
+ try f
+ finally {
+ keys.zip(currentValues).foreach {
+ case (key, Some(value)) => conf.setConfString(key, value)
+ case (key, None) => conf.unsetConf(key)
+ }
+ }
+ }
+
+ /**
+ * This method provides a reflection-based implementation of [[SQLConf.isStaticConfigKey]] to
+ * adapt Spark-3.1.x
+ *
+ * TODO: Once we drop support for Spark 3.1.x, we can directly call
+ * [[SQLConf.isStaticConfigKey()]].
+ */
+ private def isStaticConfigKey(key: String): Boolean = {
+ val staticConfKeys = DynFields.builder()
+ .hiddenImpl(SQLConf.getClass, "staticConfKeys")
+ .build[JSet[String]](SQLConf)
+ .get()
+ staticConfKeys.contains(key)
+ }
}
+
+case class TestData(key: Int, value: String)
+case class TestData2(a: Int, b: Int)
diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/KyuubiSparkContextHelper.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/KyuubiSparkContextHelper.scala
index 8293123ead7..1b662eadf96 100644
--- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/KyuubiSparkContextHelper.scala
+++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/spark/KyuubiSparkContextHelper.scala
@@ -27,4 +27,6 @@ object KyuubiSparkContextHelper {
def waitListenerBus(spark: SparkSession): Unit = {
spark.sparkContext.listenerBus.waitUntilEmpty()
}
+
+ def dummyTaskContext(): TaskContextImpl = TaskContext.empty()
}
diff --git a/pom.xml b/pom.xml
index 09ee14c08b4..1fba6edeaa0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -538,8 +538,8 @@
hadoop-client
From a834ed3efb19c94035b38e7f03a442d3ce9b5423 Mon Sep 17 00:00:00 2001
From: huangzhir <306824224@qq.com>
Date: Mon, 10 Apr 2023 10:26:28 +0800
Subject: [PATCH 018/404] [KYUUBI #4530] [AUTHZ] Support non-English chars for
MASK, MASK_SHOW_FIRST_4, and MASK_SHOW_FIRST_4
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
### _Why are the changes needed?_
To fix https://github.com/apache/kyuubi/issues/4530.
1. The reason for issue https://github.com/apache/kyuubi/issues/4530 is that MASK_SHOW_FIRST_4 and MASK_SHOW_LAST_4 mask types are currently implemented using the regexp_replace method, which only replaces English letters and digits, but ignores other languages, such as Chinese.
2. To fix this issue, I modified the regexp_replace method to replace no-english characters to 'U' letters, so they will also be masked properly.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4643 from huangzhir/fixbug-datamask.
Closes #4530
abe45b278 [huangzhir] fix nit
f74e582ed [huangzhir] Move the data preparation to setup,some tests were modified due to changes in the data.
fb3f89e15 [huangzhir] 1. Modified test methods to perform end-to-end testing. 2. Mask data should not ignore spaces.
bb6406c81 [huangzhir] Rollback unnecessary changes, add tests using SQL queries, and modify the Scala style checking code.
7754d74fd [huangzhir] Switching the plan.Replace all characters except English letters and numbers with a single character 'U'.Preserve the " " character.
a905817a0 [huangzhir] fix
ce23bcd1b [huangzhir] Regression testing is to keep the original tests unchanged, and only add the "regexp_replace" test method.
a39f185dd [huangzhir] 1. Use a ‘密’ replacer for it Chinese chars 2. Use a separate ut cases for testing this regexp_replace method.
94b05db89 [huangzhir] [KYUUBI #4530] [AUTHZ] fixbug support MASK_SHOW_FIRST_4 和 MASK_SHOW_FIRST_4 chinese data mask
0fc1065ca [huangzhir] fixbug support MASK_SHOW_FIRST_4 和 MASK_SHOW_FIRST_4 chinese data mask
Authored-by: huangzhir <306824224@qq.com>
Signed-off-by: Kent Yao
---
.../authz/ranger/SparkRangerAdminPlugin.scala | 3 +-
.../ranger/SparkRangerAdminPluginSuite.scala | 9 +-
.../datamasking/DataMaskingTestBase.scala | 103 ++++++++++++++----
3 files changed, 92 insertions(+), 23 deletions(-)
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPlugin.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPlugin.scala
index 78e59ff897f..8332b27f028 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPlugin.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPlugin.scala
@@ -136,7 +136,8 @@ object SparkRangerAdminPlugin extends RangerBasePlugin("spark", "sparkSql")
val upper = s"regexp_replace($expr, '[A-Z]', 'X'$pos)"
val lower = s"regexp_replace($upper, '[a-z]', 'x'$pos)"
val digits = s"regexp_replace($lower, '[0-9]', 'n'$pos)"
- digits
+ val other = s"regexp_replace($digits, '[^A-Za-z0-9]', 'U'$pos)"
+ other
}
/**
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala
index 8711a728726..3338a331450 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala
@@ -50,11 +50,14 @@ class SparkRangerAdminPluginSuite extends AnyFunSuite {
}
assert(getMaskingExpr(buildAccessRequest(bob, "value1")).get === "md5(cast(value1 as string))")
assert(getMaskingExpr(buildAccessRequest(bob, "value2")).get ===
- "regexp_replace(regexp_replace(regexp_replace(value2, '[A-Z]', 'X'), '[a-z]', 'x')," +
- " '[0-9]', 'n')")
+ "regexp_replace(regexp_replace(regexp_replace(regexp_replace(value2, '[A-Z]', 'X')," +
+ " '[a-z]', 'x'), '[0-9]', 'n'), '[^A-Za-z0-9]', 'U')")
assert(getMaskingExpr(buildAccessRequest(bob, "value3")).get contains "regexp_replace")
assert(getMaskingExpr(buildAccessRequest(bob, "value4")).get === "date_trunc('YEAR', value4)")
- assert(getMaskingExpr(buildAccessRequest(bob, "value5")).get contains "regexp_replace")
+ assert(getMaskingExpr(buildAccessRequest(bob, "value5")).get ===
+ "concat(regexp_replace(regexp_replace(regexp_replace(regexp_replace(" +
+ "left(value5, length(value5) - 4), '[A-Z]', 'X'), '[a-z]', 'x')," +
+ " '[0-9]', 'n'), '[^A-Za-z0-9]', 'U'), right(value5, 4))")
Seq("admin", "alice").foreach { user =>
val ugi = UserGroupInformation.createRemoteUser(user)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala
index 3585397c6fa..29a70931152 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala
@@ -55,6 +55,17 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
"SELECT 20, 2, 'kyuubi', 'y', timestamp'2018-11-17 12:34:56', 'world'")
sql("INSERT INTO default.src " +
"SELECT 30, 3, 'spark', 'a', timestamp'2018-11-17 12:34:56', 'world'")
+
+ // scalastyle:off
+ val value1 = "hello WORD 123 ~!@# AßþΔЙקم๗ቐあア叶葉엽"
+ val value2 = "AßþΔЙקم๗ቐあア叶葉엽 hello WORD 123 ~!@#"
+ // AßþΔЙקم๗ቐあア叶葉엽 reference https://zh.wikipedia.org/zh-cn/Unicode#XML.E5.92.8CUnicode
+ // scalastyle:on
+ sql(s"INSERT INTO default.src " +
+ s"SELECT 10, 4, '$value1', '$value1', timestamp'2018-11-17 12:34:56', '$value1'")
+ sql("INSERT INTO default.src " +
+ s"SELECT 11, 5, '$value2', '$value2', timestamp'2018-11-17 12:34:56', '$value2'")
+
sql(s"CREATE TABLE default.unmasked $format AS SELECT * FROM default.src")
}
@@ -74,23 +85,30 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
}
test("simple query with a user doesn't have mask rules") {
- checkAnswer("kent", "SELECT key FROM default.src order by key", Seq(Row(1), Row(20), Row(30)))
+ checkAnswer(
+ "kent",
+ "SELECT key FROM default.src order by key",
+ Seq(Row(1), Row(10), Row(11), Row(20), Row(30)))
}
test("simple query with a user has mask rules") {
val result =
Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld"))
- checkAnswer("bob", "SELECT value1, value2, value3, value4, value5 FROM default.src", result)
checkAnswer(
"bob",
- "SELECT value1 as key, value2, value3, value4, value5 FROM default.src",
+ "SELECT value1, value2, value3, value4, value5 FROM default.src " +
+ "where key = 1",
+ result)
+ checkAnswer(
+ "bob",
+ "SELECT value1 as key, value2, value3, value4, value5 FROM default.src where key = 1",
result)
}
test("star") {
val result =
Seq(Row(1, md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld"))
- checkAnswer("bob", "SELECT * FROM default.src", result)
+ checkAnswer("bob", "SELECT * FROM default.src where key = 1", result)
}
test("simple udf") {
@@ -98,7 +116,8 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld"))
checkAnswer(
"bob",
- "SELECT max(value1), max(value2), max(value3), max(value4), max(value5) FROM default.src",
+ "SELECT max(value1), max(value2), max(value3), max(value4), max(value5) FROM default.src" +
+ " where key = 1",
result)
}
@@ -109,7 +128,7 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
"bob",
"SELECT coalesce(max(value1), 1), coalesce(max(value2), 1), coalesce(max(value3), 1), " +
"coalesce(max(value4), timestamp '2018-01-01 22:33:44'), coalesce(max(value5), 1) " +
- "FROM default.src",
+ "FROM default.src where key = 1",
result)
}
@@ -119,13 +138,16 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
checkAnswer(
"bob",
"SELECT value1, value2, value3, value4, value5 FROM default.src WHERE value2 in " +
- "(SELECT value2 as key FROM default.src)",
+ "(SELECT value2 as key FROM default.src where key = 1)",
result)
}
test("create a unmasked table as select from a masked one") {
withCleanTmpResources(Seq(("default.src2", "table"))) {
- doAs("bob", sql(s"CREATE TABLE default.src2 $format AS SELECT value1 FROM default.src"))
+ doAs(
+ "bob",
+ sql(s"CREATE TABLE default.src2 $format AS SELECT value1 FROM default.src " +
+ s"where key = 1"))
checkAnswer("bob", "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1"))))
}
}
@@ -133,12 +155,24 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
test("insert into a unmasked table from a masked one") {
withCleanTmpResources(Seq(("default.src2", "table"), ("default.src3", "table"))) {
doAs("bob", sql(s"CREATE TABLE default.src2 (value1 string) $format"))
- doAs("bob", sql(s"INSERT INTO default.src2 SELECT value1 from default.src"))
- doAs("bob", sql(s"INSERT INTO default.src2 SELECT value1 as v from default.src"))
+ doAs(
+ "bob",
+ sql(s"INSERT INTO default.src2 SELECT value1 from default.src " +
+ s"where key = 1"))
+ doAs(
+ "bob",
+ sql(s"INSERT INTO default.src2 SELECT value1 as v from default.src " +
+ s"where key = 1"))
checkAnswer("bob", "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1")), Row(md5Hex("1"))))
doAs("bob", sql(s"CREATE TABLE default.src3 (k int, value string) $format"))
- doAs("bob", sql(s"INSERT INTO default.src3 SELECT key, value1 from default.src"))
- doAs("bob", sql(s"INSERT INTO default.src3 SELECT key, value1 as v from default.src"))
+ doAs(
+ "bob",
+ sql(s"INSERT INTO default.src3 SELECT key, value1 from default.src " +
+ s"where key = 1"))
+ doAs(
+ "bob",
+ sql(s"INSERT INTO default.src3 SELECT key, value1 as v from default.src " +
+ s"where key = 1"))
checkAnswer("bob", "SELECT value FROM default.src3", Seq(Row(md5Hex("1")), Row(md5Hex("1"))))
}
}
@@ -152,7 +186,7 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
test("self join on a masked table") {
val s = "SELECT a.value1, b.value1 FROM default.src a" +
- " join default.src b on a.value1=b.value1"
+ " join default.src b on a.value1=b.value1 where a.key = 1 and b.key = 1 "
checkAnswer("bob", s, Seq(Row(md5Hex("1"), md5Hex("1"))))
// just for testing query multiple times, don't delete it
checkAnswer("bob", s, Seq(Row(md5Hex("1"), md5Hex("1"))))
@@ -228,17 +262,18 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
test("union an unmasked table") {
val s = """
SELECT value1 from (
- SELECT a.value1 FROM default.src a
+ SELECT a.value1 FROM default.src a where a.key = 1
union
(SELECT b.value1 FROM default.unmasked b)
) c order by value1
"""
- checkAnswer("bob", s, Seq(Row("1"), Row("2"), Row("3"), Row(md5Hex("1"))))
+ doAs("bob", sql(s).show)
+ checkAnswer("bob", s, Seq(Row("1"), Row("2"), Row("3"), Row("4"), Row("5"), Row(md5Hex("1"))))
}
test("union a masked table") {
- val s = "SELECT a.value1 FROM default.src a union" +
- " (SELECT b.value1 FROM default.src b)"
+ val s = "SELECT a.value1 FROM default.src a where a.key = 1 union" +
+ " (SELECT b.value1 FROM default.src b where b.key = 1)"
checkAnswer("bob", s, Seq(Row(md5Hex("1"))))
}
@@ -252,12 +287,42 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
withCleanTmpResources(Seq(("default.perm_view", "view"))) {
checkAnswer(
"perm_view_user",
- "SELECT value1, value2 FROM default.src where key < 20",
+ "SELECT value1, value2 FROM default.src where key = 1",
Seq(Row(1, "hello")))
checkAnswer(
"perm_view_user",
- "SELECT value1, value2 FROM default.perm_view where key < 20",
+ "SELECT value1, value2 FROM default.perm_view where key = 1",
Seq(Row(md5Hex("1"), "hello")))
}
}
+
+ // This test only includes a small subset of UCS-2 characters.
+ // But in theory, it should work for all characters
+ test("test MASK,MASK_SHOW_FIRST_4,MASK_SHOW_LAST_4 rule with non-English character set") {
+ val s1 = s"SELECT * FROM default.src where key = 10"
+ val s2 = s"SELECT * FROM default.src where key = 11"
+ // scalastyle:off
+ checkAnswer(
+ "bob",
+ s1,
+ Seq(Row(
+ 10,
+ md5Hex("4"),
+ "xxxxxUXXXXUnnnUUUUUUXUUUUUUUUUUUUU",
+ "hellxUXXXXUnnnUUUUUUXUUUUUUUUUUUUU",
+ Timestamp.valueOf("2018-01-01 00:00:00"),
+ "xxxxxUXXXXUnnnUUUUUUXUUUUUUUUUア叶葉엽")))
+ checkAnswer(
+ "bob",
+ s2,
+ Seq(Row(
+ 11,
+ md5Hex("5"),
+ "XUUUUUUUUUUUUUUxxxxxUXXXXUnnnUUUUU",
+ "AßþΔUUUUUUUUUUUxxxxxUXXXXUnnnUUUUU",
+ Timestamp.valueOf("2018-01-01 00:00:00"),
+ "XUUUUUUUUUUUUUUxxxxxUXXXXUnnnU~!@#")))
+ // scalastyle:on
+ }
+
}
From 91a2ab3665f44ade8aa768a9bf125bcd8a71478f Mon Sep 17 00:00:00 2001
From: ulysses-you
Date: Mon, 10 Apr 2023 11:41:37 +0800
Subject: [PATCH 019/404] [KYUUBI #4678] Improve FinalStageResourceManager kill
executors
### _Why are the changes needed?_
This pr change two things:
1. add a config to kill executors if the plan contains table caches. It's not always safe to kill executors if the cache is referenced by two write-like plan.
2. force adjustTargetNumExecutors when killing executors. YarnAllocator` might re-request original target executors if DRA has not updated target executors yet. Note, DRA would re-adjust executors if there are more tasks to be executed, so we are safe. It's better to adjuest target num executor once we kill executors.
### _How was this patch tested?_
These issues are found during my POC
Closes #4678 from ulysses-you/skip-cache.
Closes #4678
b12620954 [ulysses-you] Improve kill executors
Authored-by: ulysses-you
Signed-off-by: ulyssesyou
---
docs/extensions/engines/spark/rules.md | 1 +
.../spark/sql/FinalStageResourceManager.scala | 28 +++++++++++++++++--
.../org/apache/kyuubi/sql/KyuubiSQLConf.scala | 7 +++++
3 files changed, 34 insertions(+), 2 deletions(-)
diff --git a/docs/extensions/engines/spark/rules.md b/docs/extensions/engines/spark/rules.md
index a4bda5d53ff..46e8dd3d114 100644
--- a/docs/extensions/engines/spark/rules.md
+++ b/docs/extensions/engines/spark/rules.md
@@ -84,6 +84,7 @@ Kyuubi provides some configs to make these feature easy to use.
| spark.sql.optimizer.insertRepartitionBeforeWriteIfNoShuffle.enabled | false | When true, add repartition even if the original plan does not have shuffle. | 1.7.0 |
| spark.sql.optimizer.finalStageConfigIsolationWriteOnly.enabled | true | When true, only enable final stage isolation for writing. | 1.7.0 |
| spark.sql.finalWriteStage.eagerlyKillExecutors.enabled | false | When true, eagerly kill redundant executors before running final write stage. | 1.8.0 |
+| spark.sql.finalWriteStage.skipKillingExecutorsForTableCache | true | When true, skip killing executors if the plan has table caches. | 1.8.0 |
| spark.sql.finalWriteStage.retainExecutorsFactor | 1.2 | If the target executors * factor < active executors, and target executors * factor > min executors, then inject kill executors or inject custom resource profile. | 1.8.0 |
| spark.sql.finalWriteStage.resourceIsolation.enabled | false | When true, make final write stage resource isolation using custom RDD resource profile. | 1.2.0 |
| spark.sql.finalWriteStageExecutorCores | fallback spark.executor.cores | Specify the executor core request for final write stage. It would be passed to the RDD resource profile. | 1.8.0 |
diff --git a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala
index ca3f762e169..7a0ae1592d9 100644
--- a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala
+++ b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala
@@ -26,6 +26,7 @@ import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.{FilterExec, ProjectExec, SortExec, SparkPlan}
import org.apache.spark.sql.execution.adaptive._
+import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
import org.apache.spark.sql.execution.exchange.{ENSURE_REQUIREMENTS, ShuffleExchangeExec}
import org.apache.kyuubi.sql.{KyuubiSQLConf, MarkNumOutputColumnsRule}
@@ -69,6 +70,13 @@ case class FinalStageResourceManager(session: SparkSession)
return plan
}
+ // It's not safe to kill executors if this plan contains table cache.
+ // If the executor loses then the rdd would re-compute those partition.
+ if (hasTableCache(plan) &&
+ conf.getConf(KyuubiSQLConf.FINAL_WRITE_STAGE_SKIP_KILLING_EXECUTORS_FOR_TABLE_CACHE)) {
+ return plan
+ }
+
// TODO: move this to query stage optimizer when updating Spark to 3.5.x
// Since we are in `prepareQueryStage`, the AQE shuffle read has not been applied.
// So we need to apply it by self.
@@ -188,9 +196,18 @@ case class FinalStageResourceManager(session: SparkSession)
// see `https://github.com/apache/spark/pull/20604`.
// It may cause the status in `ExecutorAllocationManager` inconsistent with
// `CoarseGrainedSchedulerBackend` for a while. But it should be synchronous finally.
+ //
+ // We should adjust target num executors, otherwise `YarnAllocator` might re-request original
+ // target executors if DRA has not updated target executors yet.
+ // Note, DRA would re-adjust executors if there are more tasks to be executed, so we are safe.
+ //
+ // * We kill executor
+ // * YarnAllocator re-request target executors
+ // * DRA can not release executors since they are new added
+ // ----------------------------------------------------------------> timeline
executorAllocationClient.killExecutors(
executorIds = executorsToKill,
- adjustTargetNumExecutors = false,
+ adjustTargetNumExecutors = true,
countFailures = false,
force = false)
}
@@ -201,7 +218,7 @@ case class FinalStageResourceManager(session: SparkSession)
OptimizeShuffleWithLocalRead)
}
-trait FinalRebalanceStageHelper {
+trait FinalRebalanceStageHelper extends AdaptiveSparkPlanHelper {
@tailrec
final protected def findFinalRebalanceStage(plan: SparkPlan): Option[ShuffleQueryStageExec] = {
plan match {
@@ -216,4 +233,11 @@ trait FinalRebalanceStageHelper {
case _ => None
}
}
+
+ final protected def hasTableCache(plan: SparkPlan): Boolean = {
+ find(plan) {
+ case _: InMemoryTableScanExec => true
+ case _ => false
+ }.isDefined
+ }
}
diff --git a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala
index 4df924b519f..aeee45869e6 100644
--- a/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala
+++ b/extensions/spark/kyuubi-extension-spark-common/src/main/scala/org/apache/kyuubi/sql/KyuubiSQLConf.scala
@@ -198,6 +198,13 @@ object KyuubiSQLConf {
.booleanConf
.createWithDefault(false)
+ val FINAL_WRITE_STAGE_SKIP_KILLING_EXECUTORS_FOR_TABLE_CACHE =
+ buildConf("spark.sql.finalWriteStage.skipKillingExecutorsForTableCache")
+ .doc("When true, skip killing executors if the plan has table caches.")
+ .version("1.8.0")
+ .booleanConf
+ .createWithDefault(true)
+
val FINAL_WRITE_STAGE_PARTITION_FACTOR =
buildConf("spark.sql.finalWriteStage.retainExecutorsFactor")
.doc("If the target executors * factor < active executors, and " +
From d7532c5fd5b8fdf4931d1365520191b14e422491 Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Mon, 10 Apr 2023 13:12:34 +0800
Subject: [PATCH 020/404] [KYUUBI #4615] Bump Ranger from 2.3.0 to 2.4.0
### _Why are the changes needed?_
To close #4615
- bump Ranger version to 2.4.0, release notes: https://cwiki.apache.org/confluence/display/RANGER/Apache+Ranger+2.4.0+-+Release+Notes
- #4585 fixed duplication and conflict in policy file
- update docs
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4675 from bowenliang123/ranger-2.4.0.
Closes #4615
d403bc324 [liangbowen] bump ranger from 2.3.0 to 2.4.0
Authored-by: liangbowen
Signed-off-by: liangbowen
---
docs/security/authorization/spark/build.md | 1 +
extensions/spark/kyuubi-spark-authz/README.md | 5 +++--
extensions/spark/kyuubi-spark-authz/pom.xml | 2 +-
pom.xml | 2 +-
4 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/docs/security/authorization/spark/build.md b/docs/security/authorization/spark/build.md
index 8520d853e3c..ea45f5d6b4f 100644
--- a/docs/security/authorization/spark/build.md
+++ b/docs/security/authorization/spark/build.md
@@ -70,6 +70,7 @@ The available `ranger.version`s are shown in the following table.
| Ranger Version | Supported | Remark |
|:--------------:|:---------:|:-----------------------------------------------------------------------------------------:|
+| 2.4.x | √ | - |
| 2.3.x | √ | - |
| 2.2.x | √ | - |
| 2.1.x | √ | - |
diff --git a/extensions/spark/kyuubi-spark-authz/README.md b/extensions/spark/kyuubi-spark-authz/README.md
index 5aafaf31e10..eb3804a65b6 100644
--- a/extensions/spark/kyuubi-spark-authz/README.md
+++ b/extensions/spark/kyuubi-spark-authz/README.md
@@ -26,7 +26,7 @@
## Build
```shell
-build/mvn clean package -pl :kyuubi-spark-authz_2.12 -Dspark.version=3.2.1 -Dranger.version=2.3.0
+build/mvn clean package -pl :kyuubi-spark-authz_2.12 -Dspark.version=3.2.1 -Dranger.version=2.4.0
```
### Supported Apache Spark Versions
@@ -44,7 +44,8 @@ build/mvn clean package -pl :kyuubi-spark-authz_2.12 -Dspark.version=3.2.1 -Dran
`-Dranger.version=`
-- [x] 2.3.x (default)
+- [x] 2.4.x (default)
+- [x] 2.3.x
- [x] 2.2.x
- [x] 2.1.x
- [x] 2.0.x
diff --git a/extensions/spark/kyuubi-spark-authz/pom.xml b/extensions/spark/kyuubi-spark-authz/pom.xml
index 0ecb546591e..fc96a2809c6 100644
--- a/extensions/spark/kyuubi-spark-authz/pom.xml
+++ b/extensions/spark/kyuubi-spark-authz/pom.xml
@@ -337,7 +337,7 @@
ranger.version
- 2.3.0
+ 2.4.0
diff --git a/pom.xml b/pom.xml
index 1fba6edeaa0..f17ced3e2fb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -180,7 +180,7 @@
0.16.03.21.70.10.7
- 2.3.0
+ 2.4.03.2.153.2.15.04.1.0
From f5ef4018eeb1b6495afbc48dcfe7b2f004e8abce Mon Sep 17 00:00:00 2001
From: zwangsheng <2213335496@qq.com>
Date: Mon, 10 Apr 2023 13:16:20 +0800
Subject: [PATCH 021/404] [KYUUBI #3654][UI] Add Engine Manager Page
### _Why are the changes needed?_
Close #3654
Add Engine Manager Page for UI
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [x] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
![popo_2023-04-07 14-19-01](https://user-images.githubusercontent.com/52876270/230553293-a935533c-792f-47e6-9c3d-e91bf469452e.jpg)
Closes #4674 from zwangsheng/KYUUBI_3654.
Closes #3654
b18c7d2d9 [zwangsheng] fix style
75b61350a [zwangsheng] fix style
a064203fc [zwangsheng] I18n
da61ea2fe [zwangsheng] [KYUUBI #3654][UI] Engine Manager Page
Authored-by: zwangsheng <2213335496@qq.com>
Signed-off-by: Cheng Pan
---
kyuubi-server/web-ui/src/api/engine/index.ts | 35 ++++
kyuubi-server/web-ui/src/api/engine/types.ts | 25 +++
.../web-ui/src/locales/en_US/index.ts | 18 +-
.../web-ui/src/locales/zh_CN/index.ts | 18 +-
.../web-ui/src/router/engine/index.ts | 26 +++
kyuubi-server/web-ui/src/router/index.ts | 2 +
kyuubi-server/web-ui/src/utils/engine.ts | 26 +++
.../web-ui/src/views/engine/index.vue | 166 ++++++++++++++++++
.../views/layout/components/aside/types.ts | 10 ++
.../operation/operation-statistics/index.vue | 10 +-
.../session/session-statistics/index.vue | 6 +-
11 files changed, 326 insertions(+), 16 deletions(-)
create mode 100644 kyuubi-server/web-ui/src/api/engine/index.ts
create mode 100644 kyuubi-server/web-ui/src/api/engine/types.ts
create mode 100644 kyuubi-server/web-ui/src/router/engine/index.ts
create mode 100644 kyuubi-server/web-ui/src/utils/engine.ts
create mode 100644 kyuubi-server/web-ui/src/views/engine/index.vue
diff --git a/kyuubi-server/web-ui/src/api/engine/index.ts b/kyuubi-server/web-ui/src/api/engine/index.ts
new file mode 100644
index 00000000000..ff6dc038da5
--- /dev/null
+++ b/kyuubi-server/web-ui/src/api/engine/index.ts
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import request from '@/utils/request'
+import { IEngineSearch } from './types'
+
+export function getAllEngines(params: IEngineSearch) {
+ return request({
+ url: 'api/v1/admin/engine',
+ method: 'get',
+ params
+ })
+}
+
+export function deleteEngine(params: IEngineSearch) {
+ return request({
+ url: 'api/v1/admin/engine',
+ method: 'delete',
+ params
+ })
+}
diff --git a/kyuubi-server/web-ui/src/api/engine/types.ts b/kyuubi-server/web-ui/src/api/engine/types.ts
new file mode 100644
index 00000000000..86a05dd292c
--- /dev/null
+++ b/kyuubi-server/web-ui/src/api/engine/types.ts
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+interface IEngineSearch {
+ type: null | string
+ sharelevel: null | string
+ 'hive.server2.proxy.user': null | string
+ subdomain?: null | string
+}
+
+export { IEngineSearch }
diff --git a/kyuubi-server/web-ui/src/locales/en_US/index.ts b/kyuubi-server/web-ui/src/locales/en_US/index.ts
index d50f229150d..dc198693938 100644
--- a/kyuubi-server/web-ui/src/locales/en_US/index.ts
+++ b/kyuubi-server/web-ui/src/locales/en_US/index.ts
@@ -23,15 +23,25 @@ export default {
session_id: 'Session ID',
operation_id: 'Operation ID',
create_time: 'Create Time',
- operation: 'Operation',
- delete_confirm: 'Delete Confirm',
- close_confirm: 'Close Confirm',
- cancel_confirm: 'Cancel Confirm',
start_time: 'State Time',
complete_time: 'Completed Time',
state: 'State',
duration: 'Duration',
statement: 'Statement',
+ engine_address: 'Engine Address',
+ engine_id: 'Engine ID',
+ engine_type: 'Engine Type',
+ share_level: 'Share Level',
+ version: 'Version',
+ operation: {
+ text: 'Operation',
+ delete_confirm: 'Delete Confirm',
+ close_confirm: 'Close Confirm',
+ cancel_confirm: 'Cancel Confirm',
+ close: 'Close',
+ cancel: 'Cancel',
+ delete: 'Delete'
+ },
message: {
delete_succeeded: 'Delete {name} Succeeded',
delete_failed: 'Delete {name} Failed',
diff --git a/kyuubi-server/web-ui/src/locales/zh_CN/index.ts b/kyuubi-server/web-ui/src/locales/zh_CN/index.ts
index 443d129ccea..87b15cc4dfc 100644
--- a/kyuubi-server/web-ui/src/locales/zh_CN/index.ts
+++ b/kyuubi-server/web-ui/src/locales/zh_CN/index.ts
@@ -23,15 +23,25 @@ export default {
session_id: 'Session ID',
operation_id: 'Operation ID',
create_time: '创建时间',
- operation: '操作',
- delete_confirm: '确认删除',
- close_confirm: '确认关闭',
- cancel_confirm: '确认取消',
start_time: '开始时间',
complete_time: '完成时间',
state: '状态',
duration: '运行时间',
statement: 'Statement',
+ engine_address: 'Engine 地址',
+ engine_id: 'Engine ID',
+ engine_type: 'Engine 类型',
+ share_level: '共享级别',
+ version: '版本',
+ operation: {
+ text: '操作',
+ delete_confirm: '确认删除',
+ close_confirm: '确认关闭',
+ cancel_confirm: '确认取消',
+ close: '关闭',
+ cancel: '取消',
+ delete: '删除'
+ },
message: {
delete_succeeded: '删除 {name} 成功',
delete_failed: '删除 {name} 失败',
diff --git a/kyuubi-server/web-ui/src/router/engine/index.ts b/kyuubi-server/web-ui/src/router/engine/index.ts
new file mode 100644
index 00000000000..22b056a32ed
--- /dev/null
+++ b/kyuubi-server/web-ui/src/router/engine/index.ts
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const routes = [
+ {
+ path: '/engine/engine-statistics',
+ name: 'engine-statistics',
+ component: () => import('@/views/engine/index.vue')
+ }
+]
+
+export default routes
diff --git a/kyuubi-server/web-ui/src/router/index.ts b/kyuubi-server/web-ui/src/router/index.ts
index 4d01da5529d..241cdf50644 100644
--- a/kyuubi-server/web-ui/src/router/index.ts
+++ b/kyuubi-server/web-ui/src/router/index.ts
@@ -21,6 +21,7 @@ import workloadRoutes from './workload'
import operationRoutes from './operation'
import contactRoutes from './contact'
import sessionRoutes from './session'
+import engineRoutes from './engine'
const routes = [
{
@@ -40,6 +41,7 @@ const routes = [
...sessionRoutes,
...workloadRoutes,
...operationRoutes,
+ ...engineRoutes,
...contactRoutes
]
}
diff --git a/kyuubi-server/web-ui/src/utils/engine.ts b/kyuubi-server/web-ui/src/utils/engine.ts
new file mode 100644
index 00000000000..da6646191cd
--- /dev/null
+++ b/kyuubi-server/web-ui/src/utils/engine.ts
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+function getEngineType() {
+ return ['SPARK_SQL', 'FLINK_SQL', 'TRINO', 'HIVE_SQL', 'JDBC']
+}
+
+function getShareLevel() {
+ return ['CONNECTION', 'USER', 'GROUP', 'SERVER']
+}
+
+export { getEngineType, getShareLevel }
diff --git a/kyuubi-server/web-ui/src/views/engine/index.vue b/kyuubi-server/web-ui/src/views/engine/index.vue
new file mode 100644
index 00000000000..cecbde70975
--- /dev/null
+++ b/kyuubi-server/web-ui/src/views/engine/index.vue
@@ -0,0 +1,166 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{
+ scope.row.attributes && scope.row.attributes['kyuubi.engine.id']
+ ? scope.row.attributes['kyuubi.engine.id']
+ : '-'
+ }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/kyuubi-server/web-ui/src/views/layout/components/aside/types.ts b/kyuubi-server/web-ui/src/views/layout/components/aside/types.ts
index 4772c1a4e4d..72b150fa8cd 100644
--- a/kyuubi-server/web-ui/src/views/layout/components/aside/types.ts
+++ b/kyuubi-server/web-ui/src/views/layout/components/aside/types.ts
@@ -31,6 +31,16 @@ export const MENUS = [
}
]
},
+ {
+ label: 'Engine Management',
+ icon: 'List',
+ children: [
+ {
+ label: 'Engine Statistics',
+ router: '/engine/engine-statistics'
+ }
+ ]
+ },
{
label: 'Workload',
icon: 'List',
diff --git a/kyuubi-server/web-ui/src/views/operation/operation-statistics/index.vue b/kyuubi-server/web-ui/src/views/operation/operation-statistics/index.vue
index ff6706c7207..992257eb8c4 100644
--- a/kyuubi-server/web-ui/src/views/operation/operation-statistics/index.vue
+++ b/kyuubi-server/web-ui/src/views/operation/operation-statistics/index.vue
@@ -54,18 +54,18 @@
: '-'
}}
-
+
@@ -75,13 +75,13 @@
diff --git a/kyuubi-server/web-ui/src/views/session/session-statistics/index.vue b/kyuubi-server/web-ui/src/views/session/session-statistics/index.vue
index 40a9b7568d0..327664dd166 100644
--- a/kyuubi-server/web-ui/src/views/session/session-statistics/index.vue
+++ b/kyuubi-server/web-ui/src/views/session/session-statistics/index.vue
@@ -46,16 +46,16 @@
}}
-
+
From 458d92540ea0f26ec53af2ffbba2d8937bd64aea Mon Sep 17 00:00:00 2001
From: Fu Chen
Date: Mon, 10 Apr 2023 14:19:20 +0800
Subject: [PATCH 022/404] [KYUUBI #4590] Bump delta from 2.2.0 to 2.3.0
### _Why are the changes needed?_
test against delta-2.3.0
https://github.com/delta-io/delta/releases/tag/v2.3.0
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4590 from cfmcgrady/delta-2.3.0.
Closes #4590
cbabf2191 [Fu Chen] delta 2.3.0
f05faf09a [Fu Chen] fix
62cd94728 [Fu Chen] test against delta-2.3.0rc1
Authored-by: Fu Chen
Signed-off-by: Cheng Pan
---
pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pom.xml b/pom.xml
index f17ced3e2fb..8202168142d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -132,7 +132,7 @@
3.12.02.12.00.7.3
- 2.2.0
+ 2.3.02.4.40.9.30.62.2
From f0615a9aab0a5b755c16ee0b966a5ea59b98bd10 Mon Sep 17 00:00:00 2001
From: ulysses-you
Date: Mon, 10 Apr 2023 16:47:28 +0800
Subject: [PATCH 023/404] [KYUUBI #4683] Update
`spark.sql.finalWriteStage.resourceIsolation.enabled` version
### _Why are the changes needed?_
fix the wrong version
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4683 from ulysses-you/followup.
Closes #4683
8e5d46fda [ulysses-you] update version
Authored-by: ulysses-you
Signed-off-by: ulyssesyou
---
docs/extensions/engines/spark/rules.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/extensions/engines/spark/rules.md b/docs/extensions/engines/spark/rules.md
index 46e8dd3d114..8fa636c318d 100644
--- a/docs/extensions/engines/spark/rules.md
+++ b/docs/extensions/engines/spark/rules.md
@@ -86,7 +86,7 @@ Kyuubi provides some configs to make these feature easy to use.
| spark.sql.finalWriteStage.eagerlyKillExecutors.enabled | false | When true, eagerly kill redundant executors before running final write stage. | 1.8.0 |
| spark.sql.finalWriteStage.skipKillingExecutorsForTableCache | true | When true, skip killing executors if the plan has table caches. | 1.8.0 |
| spark.sql.finalWriteStage.retainExecutorsFactor | 1.2 | If the target executors * factor < active executors, and target executors * factor > min executors, then inject kill executors or inject custom resource profile. | 1.8.0 |
-| spark.sql.finalWriteStage.resourceIsolation.enabled | false | When true, make final write stage resource isolation using custom RDD resource profile. | 1.2.0 |
+| spark.sql.finalWriteStage.resourceIsolation.enabled | false | When true, make final write stage resource isolation using custom RDD resource profile. | 1.8.0 |
| spark.sql.finalWriteStageExecutorCores | fallback spark.executor.cores | Specify the executor core request for final write stage. It would be passed to the RDD resource profile. | 1.8.0 |
| spark.sql.finalWriteStageExecutorMemory | fallback spark.executor.memory | Specify the executor on heap memory request for final write stage. It would be passed to the RDD resource profile. | 1.8.0 |
| spark.sql.finalWriteStageExecutorMemoryOverhead | fallback spark.executor.memoryOverhead | Specify the executor memory overhead request for final write stage. It would be passed to the RDD resource profile. | 1.8.0 |
From fa60e4c70be38bd87fefe50558277fa90a632c1e Mon Sep 17 00:00:00 2001
From: fwang12
Date: Wed, 12 Apr 2023 15:25:08 +0800
Subject: [PATCH 024/404] [KYUUBI #4691] [REST] Configure
FAIL_ON_UNKNOWN_PROPERTIES to false for KyuubiScalaObjectMapper
### _Why are the changes needed?_
Do not failed on unknown properties in server side.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4691 from turboFei/ignore.
Closes #4691
a03cb5be0 [fwang12] Ignore
c406878e7 [fwang12] Fast return batch info when post batches
Authored-by: fwang12
Signed-off-by: fwang12
---
.../apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala
index 776c35ba731..724da120999 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/KyuubiScalaObjectMapper.scala
@@ -19,11 +19,13 @@ package org.apache.kyuubi.server.api
import javax.ws.rs.ext.ContextResolver
-import com.fasterxml.jackson.databind.ObjectMapper
+import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
class KyuubiScalaObjectMapper extends ContextResolver[ObjectMapper] {
- private val mapper = new ObjectMapper().registerModule(DefaultScalaModule)
+ private val mapper = new ObjectMapper()
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
+ .registerModule(DefaultScalaModule)
override def getContext(aClass: Class[_]): ObjectMapper = mapper
}
From 1029fd674d66a69d9ee4437713f22d20ca8efe01 Mon Sep 17 00:00:00 2001
From: Kent Yao
Date: Wed, 12 Apr 2023 18:05:03 +0800
Subject: [PATCH 025/404] Revert "[KYUUBI #4647] Bump Maven from 3.8.7 to 3.9.1
and Mvnd from 0.9.0 to 1.0-m6"
This reverts commit b818c6fd84e04c5a3aa13a789bceb805c29c63aa.
---
build/mvnd | 9 ++++-----
pom.xml | 4 ++--
2 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/build/mvnd b/build/mvnd
index f0c72332fa7..81a6f5c20a5 100755
--- a/build/mvnd
+++ b/build/mvnd
@@ -94,9 +94,8 @@ function get_os_arch() {
# Determine the Mvnd version from the root pom.xml file and
# install mvnd under the build/ folder if needed.
function install_mvnd() {
- local MVN_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}')
local MVND_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}')
- local MVND_MVN_SHORT_VERSION=$(echo "$MVN_VERSION" | awk -F . '{print $1$2}')
+ local MVN_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}')
MVND_BIN="$(command -v mvnd)"
if [ "$MVND_BIN" ]; then
local MVND_DETECTED_VERSION="$(mvnd -v 2>&1 | grep '(mvnd)' | awk '{print $5}')"
@@ -112,10 +111,10 @@ function install_mvnd() {
install_app \
"${APACHE_MIRROR}/maven/mvnd/${MVND_VERSION}" \
- "maven-mvnd-${MVND_VERSION}-m${MVND_MVN_SHORT_VERSION}-${OS_TYPE}-${ARCH}.tar.gz" \
- "maven-mvnd-${MVND_VERSION}-m${MVND_MVN_SHORT_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
+ "maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}.tar.gz" \
+ "maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
- MVND_BIN="${_DIR}/maven-mvnd-${MVND_VERSION}-m${MVND_MVN_SHORT_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
+ MVND_BIN="${_DIR}/maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
else
if [ "$(version $MVN_DETECTED_VERSION)" -ne "$(version $MVN_VERSION)" ]; then
echo "Mvnd $MVND_DETECTED_VERSION embedded maven version $MVN_DETECTED_VERSION is not equivalent to $MVN_VERSION required in pom."
diff --git a/pom.xml b/pom.xml
index 8202168142d..bbbabc62ba0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -109,8 +109,8 @@
1.8
- 3.9.1
- 1.0-m6
+ 3.8.7
+ 0.9.0${java.version}${java.version}2.12.17
From cbde82cfc47af8fe1fdc015e65303d072e89eaf2 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Wed, 12 Apr 2023 20:07:54 +0800
Subject: [PATCH 026/404] Revert "[KYUUBI #4502] Reduce build concurency
mvnd.minThreads in CI builds"
This reverts commit 38cf59d47be8f6d1b45562259c9e6342215888de.
---
build/mvnd | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/build/mvnd b/build/mvnd
index 81a6f5c20a5..9af3429f34b 100755
--- a/build/mvnd
+++ b/build/mvnd
@@ -25,7 +25,7 @@ _CALLING_DIR="$(pwd)"
_COMPILE_JVM_OPTS="-Xms2g -Xmx2g -XX:ReservedCodeCacheSize=1g -Xss128m"
if [ "$CI" ]; then
- export MAVEN_CLI_OPTS="-Dmvnd.minThreads=4 --no-transfer-progress --errors --fail-fast -Dstyle.color=always"
+ export MAVEN_CLI_OPTS="-Dmvnd.minThreads=8 --no-transfer-progress --errors --fail-fast -Dstyle.color=always"
fi
# Installs any application tarball given a URL, the expected tarball name,
From af82f4dfb885090eb0f16db4529ee9dce8d11fb5 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Wed, 12 Apr 2023 20:08:49 +0800
Subject: [PATCH 027/404] Revert "[KYUUBI #4481] Setup and use cached maven in
CI jobs"
This reverts commit d2a0fe2b60b6b20406ce7c02a553807b2ffdfc6e.
---
.github/actions/setup-mvnd/action.yaml | 5 +++--
.github/workflows/dep.yml | 2 +-
.github/workflows/license.yml | 2 +-
.github/workflows/master.yml | 16 ----------------
.github/workflows/nightly.yml | 2 --
.github/workflows/style.yml | 2 +-
6 files changed, 6 insertions(+), 23 deletions(-)
diff --git a/.github/actions/setup-mvnd/action.yaml b/.github/actions/setup-mvnd/action.yaml
index dac05c02479..55c8139ff8b 100644
--- a/.github/actions/setup-mvnd/action.yaml
+++ b/.github/actions/setup-mvnd/action.yaml
@@ -16,7 +16,8 @@
#
name: 'setup-mvnd'
-description: 'Setup Maven and Mvnd'
+description: 'Setup the maven daemon'
+continue-on-error: true
runs:
using: composite
steps:
@@ -31,5 +32,5 @@ runs:
run: build/mvn -v
shell: bash
- name: Check Mvnd
- run: build/mvnd -v || true
+ run: build/mvnd -v
shell: bash
diff --git a/.github/workflows/dep.yml b/.github/workflows/dep.yml
index 09197951a12..72f5c915da7 100644
--- a/.github/workflows/dep.yml
+++ b/.github/workflows/dep.yml
@@ -45,7 +45,7 @@ jobs:
java-version: 8
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
+ - name: Setup Mvnd
uses: ./.github/actions/setup-mvnd
- name: Check kyuubi modules available
id: modules-check
diff --git a/.github/workflows/license.yml b/.github/workflows/license.yml
index e62605e7f09..a490def9161 100644
--- a/.github/workflows/license.yml
+++ b/.github/workflows/license.yml
@@ -42,7 +42,7 @@ jobs:
java-version: 8
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
+ - name: Setup Mvnd
uses: ./.github/actions/setup-mvnd
- run: >-
build/mvnd org.apache.rat:apache-rat-plugin:check
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index b8b3f7072ac..8d8eaa00926 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -77,8 +77,6 @@ jobs:
java-version: ${{ matrix.java }}
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Cache Engine Archives
uses: ./.github/actions/cache-engine-archives
- name: Setup Python
@@ -133,8 +131,6 @@ jobs:
java-version: ${{ matrix.java }}
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Cache Engine Archives
uses: ./.github/actions/cache-engine-archives
- name: Build and test Kyuubi AuthZ with supported Spark versions
@@ -181,8 +177,6 @@ jobs:
java-version: ${{ matrix.java }}
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Cache Engine Archives
uses: ./.github/actions/cache-engine-archives
- name: Build Flink with maven w/o linters
@@ -229,8 +223,6 @@ jobs:
java-version: ${{ matrix.java }}
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Cache Engine Archives
uses: ./.github/actions/cache-engine-archives
- name: Build and test Hive with maven w/o linters
@@ -268,8 +260,6 @@ jobs:
java-version: ${{ matrix.java }}
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Cache Engine Archives
uses: ./.github/actions/cache-engine-archives
- name: Build and test JDBC with maven w/o linters
@@ -307,8 +297,6 @@ jobs:
java-version: ${{ matrix.java }}
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Cache Engine Archives
uses: ./.github/actions/cache-engine-archives
- name: Build and test Trino with maven w/o linters
@@ -341,8 +329,6 @@ jobs:
java-version: 8
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Cache Engine Archives
uses: ./.github/actions/cache-engine-archives
- name: Run TPC-DS Tests
@@ -480,8 +466,6 @@ jobs:
java-version: ${{ matrix.java }}
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Cache Engine Archives
uses: ./.github/actions/cache-engine-archives
- name: zookeeper integration tests
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index b53a7d29294..149da6d82b3 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -43,8 +43,6 @@ jobs:
java-version: 8
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Build with Maven
run: ./build/mvn clean install ${{ matrix.profiles }} -Dmaven.javadoc.skip=true -V
- name: Upload test logs
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index 2824e597288..6ca1903e11c 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -47,7 +47,7 @@ jobs:
java-version: 8
cache: 'maven'
check-latest: false
- - name: Setup Maven and Mvnd
+ - name: Setup Mvnd
uses: ./.github/actions/setup-mvnd
- name: Setup Python 3
uses: actions/setup-python@v4
From 1c6965270a10dc792a528ec74a8214efd8d6afa3 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Wed, 12 Apr 2023 20:09:25 +0800
Subject: [PATCH 028/404] Revert "[KYUUBI #4274] [FOLLOWUP] Increase maximum
degree of concurrency for mvnd in CI jobs"
This reverts commit 5fab9b710ae048f165d4467d0368eb5d41ed50c1.
---
.github/actions/setup-mvnd/action.yaml | 6 +-----
.github/workflows/dep.yml | 2 +-
.github/workflows/style.yml | 6 +++---
build/mvnd | 7 +------
4 files changed, 6 insertions(+), 15 deletions(-)
diff --git a/.github/actions/setup-mvnd/action.yaml b/.github/actions/setup-mvnd/action.yaml
index 55c8139ff8b..d7497e332bf 100644
--- a/.github/actions/setup-mvnd/action.yaml
+++ b/.github/actions/setup-mvnd/action.yaml
@@ -17,7 +17,6 @@
name: 'setup-mvnd'
description: 'Setup the maven daemon'
-continue-on-error: true
runs:
using: composite
steps:
@@ -27,10 +26,7 @@ runs:
path: |
build/maven-mvnd-*
build/apache-maven-*
- key: setup-mvnd-${{ runner.os }}
- - name: Check Maven
- run: build/mvn -v
- shell: bash
+ key: setup-mvnd-${{ runner.os }}-mvnd
- name: Check Mvnd
run: build/mvnd -v
shell: bash
diff --git a/.github/workflows/dep.yml b/.github/workflows/dep.yml
index 72f5c915da7..ebda6b47eec 100644
--- a/.github/workflows/dep.yml
+++ b/.github/workflows/dep.yml
@@ -50,7 +50,7 @@ jobs:
- name: Check kyuubi modules available
id: modules-check
run: >-
- build/mvnd dependency:resolve validate -q
+ build/mvnd dependency:resolve validate
-DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile"
-Pfast -Denforcer.skip=false
-pl kyuubi-ctl,kyuubi-server,kyuubi-assembly -am
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index 6ca1903e11c..562e9312055 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -56,7 +56,7 @@ jobs:
cache: 'pip'
- name: Check kyuubi modules available
id: modules-check
- run: build/mvnd dependency:resolve -DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile" -DexcludeTransitive=true -q ${{ matrix.profiles }}
+ run: build/mvnd dependency:resolve -DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile" -DexcludeTransitive=true ${{ matrix.profiles }}
continue-on-error: true
- name: Install
@@ -71,7 +71,7 @@ jobs:
- name: Scalastyle with maven
id: scalastyle-check
- run: build/mvnd scalastyle:check -q ${{ matrix.profiles }}
+ run: build/mvnd scalastyle:check ${{ matrix.profiles }}
- name: Print scalastyle error report
if: failure() && steps.scalastyle-check.outcome != 'success'
run: >-
@@ -85,7 +85,7 @@ jobs:
run: |
SPOTLESS_BLACK_VERSION=$(build/mvn help:evaluate -Dexpression=spotless.python.black.version -q -DforceStdout)
pip install black==$SPOTLESS_BLACK_VERSION
- build/mvnd spotless:check -q ${{ matrix.profiles }} -Pspotless-python
+ build/mvnd spotless:check ${{ matrix.profiles }} -Pspotless-python
- name: setup npm
uses: actions/setup-node@v3
with:
diff --git a/build/mvnd b/build/mvnd
index 9af3429f34b..493ee43adcd 100755
--- a/build/mvnd
+++ b/build/mvnd
@@ -25,7 +25,7 @@ _CALLING_DIR="$(pwd)"
_COMPILE_JVM_OPTS="-Xms2g -Xmx2g -XX:ReservedCodeCacheSize=1g -Xss128m"
if [ "$CI" ]; then
- export MAVEN_CLI_OPTS="-Dmvnd.minThreads=8 --no-transfer-progress --errors --fail-fast -Dstyle.color=always"
+ export MAVEN_CLI_OPTS="--no-transfer-progress --errors --fail-fast"
fi
# Installs any application tarball given a URL, the expected tarball name,
@@ -131,9 +131,4 @@ cd "${_CALLING_DIR}"
export MAVEN_OPTS=${MAVEN_OPTS:-"$_COMPILE_JVM_OPTS"}
echo "Using \`mvnd\` from path: $MVND_BIN" 1>&2
-
-if [ "$MAVEN_CLI_OPTS" != "" ]; then
- echo "MAVEN_CLI_OPTS=$MAVEN_CLI_OPTS"
-fi
-
${MVND_BIN} $MAVEN_CLI_OPTS "$@"
From 6431225b441193647c6134ab2587bd4f96d87638 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Wed, 12 Apr 2023 20:13:38 +0800
Subject: [PATCH 029/404] Revert "[KYUUBI #4274] [INFRA] Introduce `mvnd` to
speed up CI jobs of Dependency, Licence and Style Check"
This reverts commit d862272645dab08878c8f99158977b622227e784.
---
.github/actions/setup-mvnd/action.yaml | 32 ------
.github/workflows/dep.yml | 7 +-
.github/workflows/license.yml | 4 +-
.github/workflows/style.yml | 16 ++-
.gitignore | 1 -
.rat-excludes | 1 -
build/mvnd | 134 -------------------------
pom.xml | 1 -
8 files changed, 10 insertions(+), 186 deletions(-)
delete mode 100644 .github/actions/setup-mvnd/action.yaml
delete mode 100755 build/mvnd
diff --git a/.github/actions/setup-mvnd/action.yaml b/.github/actions/setup-mvnd/action.yaml
deleted file mode 100644
index d7497e332bf..00000000000
--- a/.github/actions/setup-mvnd/action.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-name: 'setup-mvnd'
-description: 'Setup the maven daemon'
-runs:
- using: composite
- steps:
- - name: Cache Mvnd
- uses: actions/cache@v3
- with:
- path: |
- build/maven-mvnd-*
- build/apache-maven-*
- key: setup-mvnd-${{ runner.os }}-mvnd
- - name: Check Mvnd
- run: build/mvnd -v
- shell: bash
diff --git a/.github/workflows/dep.yml b/.github/workflows/dep.yml
index ebda6b47eec..47e0d7023ee 100644
--- a/.github/workflows/dep.yml
+++ b/.github/workflows/dep.yml
@@ -26,7 +26,6 @@ on:
# when pom or dependency workflow changes
- '**/pom.xml'
- '.github/workflows/dep.yml'
- - .github/actions/setup-mvnd/*.yaml
concurrency:
group: dep-${{ github.head_ref || github.run_id }}
@@ -45,12 +44,10 @@ jobs:
java-version: 8
cache: 'maven'
check-latest: false
- - name: Setup Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Check kyuubi modules available
id: modules-check
run: >-
- build/mvnd dependency:resolve validate
+ build/mvn dependency:resolve validate
-DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile"
-Pfast -Denforcer.skip=false
-pl kyuubi-ctl,kyuubi-server,kyuubi-assembly -am
@@ -60,7 +57,7 @@ jobs:
MAVEN_OPTS: -Dorg.slf4j.simpleLogger.defaultLogLevel=error
if: steps.modules-check.conclusion == 'success' && steps.modules-check.outcome == 'failure'
run: >-
- build/mvnd clean install
+ build/mvn clean install
-Pflink-provided,spark-provided,hive-provided
-Dmaven.javadoc.skip=true
-Drat.skip=true
diff --git a/.github/workflows/license.yml b/.github/workflows/license.yml
index a490def9161..17591dacb4d 100644
--- a/.github/workflows/license.yml
+++ b/.github/workflows/license.yml
@@ -42,10 +42,8 @@ jobs:
java-version: 8
cache: 'maven'
check-latest: false
- - name: Setup Mvnd
- uses: ./.github/actions/setup-mvnd
- run: >-
- build/mvnd org.apache.rat:apache-rat-plugin:check
+ build/mvn org.apache.rat:apache-rat-plugin:check
-Ptpcds -Pspark-block-cleaner -Pkubernetes-it
-Pspark-3.1 -Pspark-3.2 -Pspark-3.3
- name: Upload rat report
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index 562e9312055..e45b826fccc 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -47,16 +47,14 @@ jobs:
java-version: 8
cache: 'maven'
check-latest: false
- - name: Setup Mvnd
- uses: ./.github/actions/setup-mvnd
- name: Setup Python 3
uses: actions/setup-python@v4
with:
python-version: '3.9'
cache: 'pip'
- - name: Check kyuubi modules available
+ - name: Check kyuubi modules avaliable
id: modules-check
- run: build/mvnd dependency:resolve -DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile" -DexcludeTransitive=true ${{ matrix.profiles }}
+ run: build/mvn dependency:resolve -DincludeGroupIds="org.apache.kyuubi" -DincludeScope="compile" -DexcludeTransitive=true ${{ matrix.profiles }}
continue-on-error: true
- name: Install
@@ -65,13 +63,13 @@ jobs:
if: steps.modules-check.conclusion == 'success' && steps.modules-check.outcome == 'failure'
run: |
MVN_OPT="-DskipTests -Dorg.slf4j.simpleLogger.defaultLogLevel=warn -Dmaven.javadoc.skip=true -Drat.skip=true -Dscalastyle.skip=true -Dspotless.check.skip"
- build/mvnd clean install ${MVN_OPT} -Pflink-provided,hive-provided,spark-provided,spark-block-cleaner,spark-3.2,tpcds
- build/mvnd clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-1 -Pspark-3.1
- build/mvnd clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-3,extensions/spark/kyuubi-spark-connector-kudu,extensions/spark/kyuubi-spark-connector-hive -Pspark-3.3
+ build/mvn clean install ${MVN_OPT} -Pflink-provided,hive-provided,spark-provided,spark-block-cleaner,spark-3.2,tpcds
+ build/mvn clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-1 -Pspark-3.1
+ build/mvn clean install ${MVN_OPT} -pl extensions/spark/kyuubi-extension-spark-3-3,extensions/spark/kyuubi-spark-connector-kudu,extensions/spark/kyuubi-spark-connector-hive -Pspark-3.3
- name: Scalastyle with maven
id: scalastyle-check
- run: build/mvnd scalastyle:check ${{ matrix.profiles }}
+ run: build/mvn scalastyle:check ${{ matrix.profiles }}
- name: Print scalastyle error report
if: failure() && steps.scalastyle-check.outcome != 'success'
run: >-
@@ -85,7 +83,7 @@ jobs:
run: |
SPOTLESS_BLACK_VERSION=$(build/mvn help:evaluate -Dexpression=spotless.python.black.version -q -DforceStdout)
pip install black==$SPOTLESS_BLACK_VERSION
- build/mvnd spotless:check ${{ matrix.profiles }} -Pspotless-python
+ build/mvn spotless:check ${{ matrix.profiles }} -Pspotless-python
- name: setup npm
uses: actions/setup-node@v3
with:
diff --git a/.gitignore b/.gitignore
index 190294d06f3..a43859338dc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,7 +40,6 @@
.scala_dependencies
.settings
build/apache-maven*
-build/maven-mvnd*
build/release/tmp
build/scala*
build/test
diff --git a/.rat-excludes b/.rat-excludes
index 7a841cf9c6c..645c673d08f 100644
--- a/.rat-excludes
+++ b/.rat-excludes
@@ -32,7 +32,6 @@
NOTICE*
docs/**
build/apache-maven-*/**
-build/maven-mvnd-*/**
build/scala-*/**
**/**/operation_logs/**/**
**/**/server_operation_logs/**/**
diff --git a/build/mvnd b/build/mvnd
deleted file mode 100755
index 493ee43adcd..00000000000
--- a/build/mvnd
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Determine the current working directory
-_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-# Preserve the calling directory
-_CALLING_DIR="$(pwd)"
-# Options used during compilation
-_COMPILE_JVM_OPTS="-Xms2g -Xmx2g -XX:ReservedCodeCacheSize=1g -Xss128m"
-
-if [ "$CI" ]; then
- export MAVEN_CLI_OPTS="--no-transfer-progress --errors --fail-fast"
-fi
-
-# Installs any application tarball given a URL, the expected tarball name,
-# and, optionally, a checkable binary path to determine if the binary has
-# already been installed
-## Arg1 - URL
-## Arg2 - Tarball Name
-## Arg3 - Checkable Binary
-install_app() {
- local remote_tarball="$1/$2"
- local local_tarball="${_DIR}/$2"
- local binary="${_DIR}/$3"
-
- # setup `curl` and `wget` silent options if we're running on Jenkins
- local curl_opts="-L"
- local wget_opts=""
- curl_opts="--progress-bar ${curl_opts}"
- wget_opts="--progress=bar:force ${wget_opts}"
-
- if [ -z "$3" ] || [ ! -f "$binary" ]; then
- # check if we already have the tarball
- # check if we have curl installed
- # download application
- rm -f "$local_tarball"
- [ ! -f "${local_tarball}" ] && [ "$(command -v curl)" ] && \
- echo "exec: curl ${curl_opts} ${remote_tarball}" 1>&2 && \
- curl ${curl_opts} "${remote_tarball}" > "${local_tarball}"
- # if the file still doesn't exist, lets try `wget` and cross our fingers
- [ ! -f "${local_tarball}" ] && [ "$(command -v wget)" ] && \
- echo "exec: wget ${wget_opts} ${remote_tarball}" 1>&2 && \
- wget ${wget_opts} -O "${local_tarball}" "${remote_tarball}"
- # if both were unsuccessful, exit
- [ ! -f "${local_tarball}" ] && \
- echo -n "ERROR: Cannot download $2 with cURL or wget; " && \
- echo "please install manually and try again." && \
- exit 2
- cd "${_DIR}" && tar -xzf "$2"
- rm -rf "$local_tarball"
- fi
-}
-
-function get_os_type() {
- local unameOsOut=$(uname -s)
- local osType
- case "${unameOsOut}" in
- Linux*) osType=linux ;;
- Darwin*) osType=darwin ;;
- CYGWIN*) osType=windows ;;
- MINGW*) osType=windows ;;
- *) osType="UNKNOWN:${unameOsOut}" ;;
- esac
- echo "$osType"
-}
-
-function get_os_arch() {
- local unameArchOut="$(uname -m)"
- local arch
- case "${unameArchOut}" in
- x86_64*) arch=amd64 ;;
- arm64*) arch=aarch64 ;;
- *) arch="UNKNOWN:${unameOsOut}" ;;
- esac
- echo "$arch"
-}
-
-# Determine the Mvnd version from the root pom.xml file and
-# install mvnd under the build/ folder if needed.
-function install_mvnd() {
- local MVND_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}')
- local MVN_VERSION=$(grep "" "${_DIR}/../pom.xml" | head -n1 | awk -F '[<>]' '{print $3}')
- MVND_BIN="$(command -v mvnd)"
- if [ "$MVND_BIN" ]; then
- local MVND_DETECTED_VERSION="$(mvnd -v 2>&1 | grep '(mvnd)' | awk '{print $5}')"
- local MVN_DETECTED_VERSION="$(mvnd -v 2>&1 | grep 'Apache Maven' | awk 'NR==2 {print $3}')"
- fi
- # See simple version normalization: http://stackoverflow.com/questions/16989598/bash-comparing-version-numbers
- function version { echo "$@" | awk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'; }
-
- if [ $(version $MVND_DETECTED_VERSION) -ne $(version $MVND_VERSION) ]; then
- local APACHE_MIRROR=${APACHE_MIRROR:-'https://downloads.apache.org'}
- local OS_TYPE=$(get_os_type)
- local ARCH=$(get_os_arch)
-
- install_app \
- "${APACHE_MIRROR}/maven/mvnd/${MVND_VERSION}" \
- "maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}.tar.gz" \
- "maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
-
- MVND_BIN="${_DIR}/maven-mvnd-${MVND_VERSION}-${OS_TYPE}-${ARCH}/bin/mvnd"
- else
- if [ "$(version $MVN_DETECTED_VERSION)" -ne "$(version $MVN_VERSION)" ]; then
- echo "Mvnd $MVND_DETECTED_VERSION embedded maven version $MVN_DETECTED_VERSION is not equivalent to $MVN_VERSION required in pom."
- exit 1
- fi
- fi
-}
-
-install_mvnd
-
-cd "${_CALLING_DIR}"
-
-# Set any `mvn` options if not already present
-export MAVEN_OPTS=${MAVEN_OPTS:-"$_COMPILE_JVM_OPTS"}
-
-echo "Using \`mvnd\` from path: $MVND_BIN" 1>&2
-${MVND_BIN} $MAVEN_CLI_OPTS "$@"
diff --git a/pom.xml b/pom.xml
index bbbabc62ba0..aa4a83a5617 100644
--- a/pom.xml
+++ b/pom.xml
@@ -110,7 +110,6 @@
1.83.8.7
- 0.9.0${java.version}${java.version}2.12.17
From 72c2b601da0bd5dda21b48040c69511ede97fb00 Mon Sep 17 00:00:00 2001
From: pengqli
Date: Thu, 13 Apr 2023 09:48:06 +0800
Subject: [PATCH 030/404] [KYUUBI #4697] [K8S][HELM] Add template rendering
info to README
### _Why are the changes needed?_
The user wants to test the template rendering so they can see the output. Debugging templates provide a quick way of viewing the generated content without YAML parse errors blocking.
There are two ways to render templates.
locally with `helm template --debug ../kyuubi`
on the server side with `helm install --dry-run --debug --generate-name ../kyuubi`
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4697 from dev-lpq/helm_rendering.
Closes #4697
d83693df1 [Cheng Pan] Update charts/kyuubi/README.md
709d0b0a3 [pengqli] helm add template rendering
bce8ad9c6 [pengqli] helm add template rendering
Lead-authored-by: pengqli
Co-authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
charts/kyuubi/README.md | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/charts/kyuubi/README.md b/charts/kyuubi/README.md
index ef54c322605..a01fafde137 100644
--- a/charts/kyuubi/README.md
+++ b/charts/kyuubi/README.md
@@ -32,6 +32,20 @@ cluster using the [Helm](https://helm.sh) package manager.
- Kubernetes cluster
- Helm 3.0+
+## Template rendering
+
+When you want to test the template rendering, but not actually install anything. [Debugging templates](https://helm.sh/docs/chart_template_guide/debugging/) provide a quick way of viewing the generated content without YAML parse errors blocking.
+
+There are two ways to render templates. It will return the rendered template to you so you can see the output.
+
+- Local rendering chart templates
+```shell
+helm template --debug ../kyuubi
+```
+- Server side rendering chart templates
+```shell
+helm install --dry-run --debug --generate-name ../kyuubi
+```
## Documentation
From a9d3e11926ec828f8439ce5017e1aa8b62183b3e Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Thu, 13 Apr 2023 13:06:29 +0800
Subject: [PATCH 031/404] [KYUUBI #4652] Upgrade Hadoop from 3.3.4 to 3.3.5
### _Why are the changes needed?_
Upgrade Hadoop from 3.3.4 to 3.3.5, close #4652
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4689 from huapan123456/master.
Closes #4652
810af9848 [Cheng Pan] fix
380786e9f [Cheng Pan] nit
7c5f18ef9 [Cheng Pan] nit
2d308943b [Cheng Pan] nit
76869d822 [Cheng Pan] handle HDFS-16591
36142c787 [huapan] fix(KYUUBI #4652): the position of JaasConfiguration has changed on hadoop-clint-api-3.3.5
1b5de6dfe [huapan] feat(KYUUBI #4652): upgrade hadoop-version from 3.3.4 to 3.3.5
f9d34c716 [huapan] feat(KYUUBI #4652): upgrade hadoop-client-api from 3.3.4 to 3.3.5
Lead-authored-by: Cheng Pan
Co-authored-by: huapan
Signed-off-by: Cheng Pan
---
dev/dependencyList | 4 +-
.../zookeeper/ZookeeperClientProvider.scala | 66 +++++++++----------
.../ZookeeperDiscoveryClientSuite.scala | 9 +--
pom.xml | 2 +-
4 files changed, 41 insertions(+), 40 deletions(-)
diff --git a/dev/dependencyList b/dev/dependencyList
index ab7697d3516..fc9913604ec 100644
--- a/dev/dependencyList
+++ b/dev/dependencyList
@@ -51,8 +51,8 @@ grpc-protobuf/1.48.0//grpc-protobuf-1.48.0.jar
grpc-stub/1.48.0//grpc-stub-1.48.0.jar
gson/2.9.0//gson-2.9.0.jar
guava/31.1-jre//guava-31.1-jre.jar
-hadoop-client-api/3.3.4//hadoop-client-api-3.3.4.jar
-hadoop-client-runtime/3.3.4//hadoop-client-runtime-3.3.4.jar
+hadoop-client-api/3.3.5//hadoop-client-api-3.3.5.jar
+hadoop-client-runtime/3.3.5//hadoop-client-runtime-3.3.5.jar
hive-common/3.1.3//hive-common-3.1.3.jar
hive-metastore/3.1.3//hive-metastore-3.1.3.jar
hive-serde/3.1.3//hive-serde-3.1.3.jar
diff --git a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperClientProvider.scala b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperClientProvider.scala
index 8dd32d6b62b..b7297d969ea 100644
--- a/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperClientProvider.scala
+++ b/kyuubi-ha/src/main/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperClientProvider.scala
@@ -18,6 +18,7 @@
package org.apache.kyuubi.ha.client.zookeeper
import java.io.{File, IOException}
+import java.nio.charset.StandardCharsets
import javax.security.auth.login.Configuration
import scala.util.Random
@@ -26,13 +27,13 @@ import com.google.common.annotations.VisibleForTesting
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry._
import org.apache.hadoop.security.UserGroupInformation
-import org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager.JaasConfiguration
import org.apache.kyuubi.Logging
import org.apache.kyuubi.config.KyuubiConf
import org.apache.kyuubi.ha.HighAvailabilityConf._
import org.apache.kyuubi.ha.client.{AuthTypes, RetryPolicies}
import org.apache.kyuubi.ha.client.RetryPolicies._
+import org.apache.kyuubi.reflection.DynConstructors
import org.apache.kyuubi.util.KyuubiHadoopUtils
object ZookeeperClientProvider extends Logging {
@@ -65,10 +66,8 @@ object ZookeeperClientProvider extends Logging {
.aclProvider(new ZookeeperACLProvider(conf))
.retryPolicy(retryPolicy)
- conf.get(HA_ZK_AUTH_DIGEST) match {
- case Some(anthString) =>
- builder.authorization("digest", anthString.getBytes("UTF-8"))
- case _ =>
+ conf.get(HA_ZK_AUTH_DIGEST).foreach { authString =>
+ builder.authorization("digest", authString.getBytes(StandardCharsets.UTF_8))
}
builder.build()
@@ -103,46 +102,47 @@ object ZookeeperClientProvider extends Logging {
*/
@throws[Exception]
def setUpZooKeeperAuth(conf: KyuubiConf): Unit = {
- def setupZkAuth(): Unit = {
- val keyTabFile = getKeyTabFile(conf)
- val maybePrincipal = conf.get(HA_ZK_AUTH_PRINCIPAL)
- val kerberized = maybePrincipal.isDefined && keyTabFile.isDefined
- if (UserGroupInformation.isSecurityEnabled && kerberized) {
- if (!new File(keyTabFile.get).exists()) {
- throw new IOException(s"${HA_ZK_AUTH_KEYTAB.key}: $keyTabFile does not exists")
+ def setupZkAuth(): Unit = (conf.get(HA_ZK_AUTH_PRINCIPAL), getKeyTabFile(conf)) match {
+ case (Some(principal), Some(keytab)) if UserGroupInformation.isSecurityEnabled =>
+ if (!new File(keytab).exists()) {
+ throw new IOException(s"${HA_ZK_AUTH_KEYTAB.key}: $keytab does not exists")
}
System.setProperty("zookeeper.sasl.clientconfig", "KyuubiZooKeeperClient")
- var principal = maybePrincipal.get
- principal = KyuubiHadoopUtils.getServerPrincipal(principal)
- val jaasConf = new JaasConfiguration("KyuubiZooKeeperClient", principal, keyTabFile.get)
+ val serverPrincipal = KyuubiHadoopUtils.getServerPrincipal(principal)
+ // HDFS-16591 makes breaking change on JaasConfiguration
+ val jaasConf = DynConstructors.builder()
+ .impl( // Hadoop 3.3.5 and above
+ "org.apache.hadoop.security.authentication.util.JaasConfiguration",
+ classOf[String],
+ classOf[String],
+ classOf[String])
+ .impl( // Hadoop 3.3.4 and previous
+ // scalastyle:off
+ "org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager.JaasConfiguration",
+ // scalastyle:on
+ classOf[String],
+ classOf[String],
+ classOf[String])
+ .build[Configuration]()
+ .newInstance("KyuubiZooKeeperClient", serverPrincipal, keytab)
Configuration.setConfiguration(jaasConf)
- }
+ case _ =>
}
- if (conf.get(HA_ENGINE_REF_ID).isEmpty
- && AuthTypes.withName(conf.get(HA_ZK_AUTH_TYPE)) == AuthTypes.KERBEROS) {
+ if (conf.get(HA_ENGINE_REF_ID).isEmpty &&
+ AuthTypes.withName(conf.get(HA_ZK_AUTH_TYPE)) == AuthTypes.KERBEROS) {
setupZkAuth()
- } else if (conf.get(HA_ENGINE_REF_ID).nonEmpty && AuthTypes
- .withName(conf.get(HA_ZK_ENGINE_AUTH_TYPE)) == AuthTypes.KERBEROS) {
+ } else if (conf.get(HA_ENGINE_REF_ID).nonEmpty &&
+ AuthTypes.withName(conf.get(HA_ZK_ENGINE_AUTH_TYPE)) == AuthTypes.KERBEROS) {
setupZkAuth()
}
-
}
@VisibleForTesting
def getKeyTabFile(conf: KyuubiConf): Option[String] = {
- val zkAuthKeytab = conf.get(HA_ZK_AUTH_KEYTAB)
- if (zkAuthKeytab.isDefined) {
- val zkAuthKeytabPath = zkAuthKeytab.get
- val relativeFileName = new File(zkAuthKeytabPath).getName
- if (new File(relativeFileName).exists()) {
- Some(relativeFileName)
- } else {
- Some(zkAuthKeytabPath)
- }
- } else {
- None
+ conf.get(HA_ZK_AUTH_KEYTAB).map { fullPath =>
+ val filename = new File(fullPath).getName
+ if (new File(filename).exists()) filename else fullPath
}
}
-
}
diff --git a/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClientSuite.scala b/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClientSuite.scala
index bbd8b94ac7c..e3f5546a9b0 100644
--- a/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClientSuite.scala
+++ b/kyuubi-ha/src/test/scala/org/apache/kyuubi/ha/client/zookeeper/ZookeeperDiscoveryClientSuite.scala
@@ -37,6 +37,7 @@ import org.apache.kyuubi.config.KyuubiConf
import org.apache.kyuubi.ha.HighAvailabilityConf._
import org.apache.kyuubi.ha.client._
import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient
+import org.apache.kyuubi.ha.client.zookeeper.ZookeeperClientProvider._
import org.apache.kyuubi.service._
import org.apache.kyuubi.zookeeper.EmbeddedZookeeper
import org.apache.kyuubi.zookeeper.ZookeeperConf.ZK_CLIENT_PORT
@@ -117,7 +118,7 @@ abstract class ZookeeperDiscoveryClientSuite extends DiscoveryClientTests
conf.set(HA_ZK_AUTH_PRINCIPAL.key, principal)
conf.set(HA_ZK_AUTH_TYPE.key, AuthTypes.KERBEROS.toString)
- ZookeeperClientProvider.setUpZooKeeperAuth(conf)
+ setUpZooKeeperAuth(conf)
val configuration = Configuration.getConfiguration
val entries = configuration.getAppConfigurationEntry("KyuubiZooKeeperClient")
@@ -129,9 +130,9 @@ abstract class ZookeeperDiscoveryClientSuite extends DiscoveryClientTests
assert(options("useKeyTab").toString.toBoolean)
conf.set(HA_ZK_AUTH_KEYTAB.key, s"${keytab.getName}")
- val e = intercept[IOException](ZookeeperClientProvider.setUpZooKeeperAuth(conf))
- assert(e.getMessage ===
- s"${HA_ZK_AUTH_KEYTAB.key}: ${ZookeeperClientProvider.getKeyTabFile(conf)} does not exists")
+ val e = intercept[IOException](setUpZooKeeperAuth(conf))
+ assert(
+ e.getMessage === s"${HA_ZK_AUTH_KEYTAB.key}: ${getKeyTabFile(conf).get} does not exists")
}
}
diff --git a/pom.xml b/pom.xml
index aa4a83a5617..1c702091cd5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -143,7 +143,7 @@
1.48.031.1-jre1.0.1
- 3.3.4
+ 3.3.54.0.310.14.2.01.0.2
From 52251ddae2c3c4c5940fb97d44d4d5ab85ad8538 Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Thu, 13 Apr 2023 16:08:32 +0800
Subject: [PATCH 032/404] [KYUUBI #4677] [AUTHZ] Check generated policy file in
test suite
### _Why are the changes needed?_
- add ut to check generated Ranger policy file in #4585
- manually activated `genpolicy` profile in CI builds, as the property based activation not auto-triggered as expectedly with property `ranger.version=2.4.0` set in project parent pom
- Support regenerated policy file within the same test suite, by running
`KYUUBI_UPDATE=1 build/mvn clean test -pl :kyuubi-spark-authz_2.12 -Dtest=none -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.PolicyJsonFileGenerator -Pgenpolicy`
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4677 from bowenliang123/authz-check-policy-gen.
Closes #4677
a372bdfd4 [liangbowen] remove unnecessary profile used in style workflow
7562c88f2 [liangbowen] include in spotless
37b674223 [liangbowen] update policy id
724ec5e28 [liangbowen] replace counter by using zipWithIndex
d322980e7 [liangbowen] extract KRangerPolicyResource object to simplify resource assembly
42c37606a [liangbowen] nit
18a8f4c51 [liangbowen] add usage comments
4ee254d6d [liangbowen] fix issue name in docs
d3cb08d21 [liangbowen] improve file reading
37e4c9c9f [Bowen Liang] Merge branch 'master' into authz-check-policy-gen
6366c50e4 [liangbowen] rename profile to `gen-policy` and remove activation rule by property setting
892faf5ef [liangbowen] update clue
266baa71a [liangbowen] update
cb94e8014 [liangbowen] update
de1f36531 [liangbowen] cleanup
e88c75d46 [liangbowen] check policy file gen
Lead-authored-by: liangbowen
Co-authored-by: Bowen Liang
Signed-off-by: liangbowen
---
.github/workflows/master.yml | 2 +-
docs/security/authorization/spark/build.md | 2 +-
extensions/spark/kyuubi-spark-authz/pom.xml | 9 +-
.../authz/gen/PolicyJsonFileGenerator.scala | 152 +++++++++---------
.../spark/authz/gen/RangerGenWrapper.scala | 27 ++++
.../test/resources/sparkSql_hive_jenkins.json | 64 ++++----
pom.xml | 1 +
7 files changed, 142 insertions(+), 115 deletions(-)
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 8d8eaa00926..ae5b8188d82 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -32,7 +32,7 @@ concurrency:
cancel-in-progress: true
env:
- MVN_OPT: -Dmaven.javadoc.skip=true -Drat.skip=true -Dscalastyle.skip=true -Dspotless.check.skip -Dorg.slf4j.simpleLogger.defaultLogLevel=warn -Pjdbc-shaded -Dmaven.plugin.download.cache.path=/tmp/engine-archives
+ MVN_OPT: -Dmaven.javadoc.skip=true -Drat.skip=true -Dscalastyle.skip=true -Dspotless.check.skip -Dorg.slf4j.simpleLogger.defaultLogLevel=warn -Pjdbc-shaded,gen-policy -Dmaven.plugin.download.cache.path=/tmp/engine-archives
KUBERNETES_VERSION: v1.26.1
MINIKUBE_VERSION: v1.29.0
diff --git a/docs/security/authorization/spark/build.md b/docs/security/authorization/spark/build.md
index ea45f5d6b4f..7e38f2eed19 100644
--- a/docs/security/authorization/spark/build.md
+++ b/docs/security/authorization/spark/build.md
@@ -79,7 +79,7 @@ The available `ranger.version`s are shown in the following table.
| 1.1.x | √ | - |
| 1.0.x | √ | - |
| 0.7.x | √ | - |
-| 0.6.x | X | [RANGER-4672](https://github.com/apache/kyuubi/issues/4672) reported unresolved failures. |
+| 0.6.x | X | [KYUUBI-4672](https://github.com/apache/kyuubi/issues/4672) reported unresolved failures. |
Currently, all ranger releases are supported.
diff --git a/extensions/spark/kyuubi-spark-authz/pom.xml b/extensions/spark/kyuubi-spark-authz/pom.xml
index fc96a2809c6..27417109d93 100644
--- a/extensions/spark/kyuubi-spark-authz/pom.xml
+++ b/extensions/spark/kyuubi-spark-authz/pom.xml
@@ -332,14 +332,7 @@
- genpolicy
-
-
-
- ranger.version
- 2.4.0
-
-
+ gen-policy
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala
index ce0e5fd7013..8dbc802b81b 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala
@@ -17,49 +17,71 @@
package org.apache.kyuubi.plugin.spark.authz.gen
-import java.nio.file.Paths
+import java.nio.charset.StandardCharsets
+import java.nio.file.{Files, Paths, StandardOpenOption}
import java.util.UUID
-import java.util.concurrent.atomic.AtomicLong
-
-import scala.language.implicitConversions
import com.fasterxml.jackson.annotation.JsonInclude.Include
import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import com.fasterxml.jackson.databind.json.JsonMapper
import com.fasterxml.jackson.databind.node.ObjectNode
import com.fasterxml.jackson.module.scala.DefaultScalaModule
+import org.apache.commons.io.FileUtils
import org.apache.ranger.plugin.model.RangerPolicy
+// scalastyle:off
+import org.scalatest.funsuite.AnyFunSuite
-import org.apache.kyuubi.plugin.spark.authz.gen.KRangerPolicyItemAccess.allowTypes
-import org.apache.kyuubi.plugin.spark.authz.gen.PolicyJsonFileGenerator.RangerAccessType.{all, alter, create, drop, index, lock, read, select, update, use, write, RangerAccessType}
-import org.apache.kyuubi.plugin.spark.authz.gen.RangerClassConversions.getRangerObject
+import org.apache.kyuubi.plugin.spark.authz.gen.KRangerPolicyItemAccess._
+import org.apache.kyuubi.plugin.spark.authz.gen.KRangerPolicyResource._
+import org.apache.kyuubi.plugin.spark.authz.gen.RangerAccessType._
+import org.apache.kyuubi.plugin.spark.authz.gen.RangerClassConversions._
/**
* Generates the policy file to test/main/resources dir.
*
- * Usage:
- * build/mvn scala:run -pl :kyuubi-spark-authz_2.12
- * -DmainClass=org.apache.kyuubi.plugin.spark.authz.gen.PolicyJsonFileGenerator
+ * To run the test suite:
+ * build/mvn clean test -Pgen-policy -pl :kyuubi-spark-authz_2.12 -Dtest=none
+ * -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.PolicyJsonFileGenerator
+ *
+ * To regenerate the ranger policy file:
+ * KYUUBI_UPDATE=1 build/mvn clean test -Pgen-policy -pl :kyuubi-spark-authz_2.12 -Dtest=none
+ * -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.PolicyJsonFileGenerator
*/
-private object PolicyJsonFileGenerator {
- def main(args: Array[String]): Unit = {
- writeRangerServicePolicesJson()
- }
-
+class PolicyJsonFileGenerator extends AnyFunSuite {
+ // scalastyle:on
final private val mapper: ObjectMapper = JsonMapper.builder()
.addModule(DefaultScalaModule)
.serializationInclusion(Include.NON_NULL)
.build()
- def writeRangerServicePolicesJson(): Unit = {
+ test("check ranger policy file") {
val pluginHome = getClass.getProtectionDomain.getCodeSource.getLocation.getPath
.split("target").head
val policyFileName = "sparkSql_hive_jenkins.json"
- val policyFile = Paths.get(pluginHome, "src", "test", "resources", policyFileName).toFile
- // scalastyle:off println
- println(s"Writing ranger policies to $policyFileName.")
- // scalastyle:on println
- mapper.writerWithDefaultPrettyPrinter().writeValue(policyFile, servicePolicies)
+ val policyFilePath =
+ Paths.get(pluginHome, "src", "test", "resources", policyFileName)
+ val generatedStr = mapper.writerWithDefaultPrettyPrinter()
+ .writeValueAsString(servicePolicies)
+
+ if (sys.env.get("KYUUBI_UPDATE").contains("1")) {
+ // scalastyle:off println
+ println(s"Writing ranger policies to $policyFileName.")
+ // scalastyle:on println
+ Files.write(
+ policyFilePath,
+ generatedStr.getBytes(StandardCharsets.UTF_8),
+ StandardOpenOption.CREATE,
+ StandardOpenOption.TRUNCATE_EXISTING)
+ } else {
+ val existedFileContent =
+ FileUtils.readFileToString(policyFilePath.toFile, StandardCharsets.UTF_8)
+ withClue("Please regenerate the ranger policy file by running"
+ + "`KYUUBI_UPDATE=1 build/mvn clean test -Pgen-policy"
+ + " -pl :kyuubi-spark-authz_2.12 -Dtest=none"
+ + " -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.PolicyJsonFileGenerator`.") {
+ assert(generatedStr.equals(existedFileContent))
+ }
+ }
}
private def servicePolicies: JsonNode = {
@@ -96,24 +118,15 @@ private object PolicyJsonFileGenerator {
policyMaskDateShowYearForValue4,
policyMaskShowFirst4ForValue5)
// fill the id and guid with auto-increased index
- .map(p => {
- val id = policyIdCounter.incrementAndGet()
- p.setId(id)
- p.setGuid(UUID.nameUUIDFromBytes(id.toString.getBytes()).toString)
- p
- })
+ .zipWithIndex
+ .map {
+ case (p, index) =>
+ p.setId(index)
+ p.setGuid(UUID.nameUUIDFromBytes(index.toString.getBytes()).toString)
+ p
+ }
}
- final private lazy val policyIdCounter = new AtomicLong(0)
-
- // resource template
- private def databaseRes(values: List[String]) =
- "database" -> KRangerPolicyResource(values = values).get
- private def tableRes(values: List[String]) =
- "table" -> KRangerPolicyResource(values = values).get
- private def columnRes(values: List[String]) =
- "column" -> KRangerPolicyResource(values = values).get
-
// users
private val admin = "admin"
private val bob = "bob"
@@ -130,18 +143,11 @@ private object PolicyJsonFileGenerator {
private val icebergNamespace = "iceberg_ns"
private val namespace1 = "ns1"
- // access type
- object RangerAccessType extends Enumeration {
- type RangerAccessType = Value
- val select, update, create, drop, alter, index, lock, all, read, write, use = Value
- }
- implicit def actionTypeStr(t: RangerAccessType): String = t.toString
-
// resources
- private val allDatabaseRes = databaseRes(List("*"))
- private val allTableRes = tableRes(List("*"))
- private val allColumnRes = columnRes(List("*"))
- private val srcTableRes = tableRes(List("src"))
+ private val allDatabaseRes = databaseRes("*")
+ private val allTableRes = tableRes("*")
+ private val allColumnRes = columnRes("*")
+ private val srcTableRes = tableRes("src")
// policy type
private val POLICY_TYPE_ACCESS: Int = 0
@@ -182,7 +188,7 @@ private object PolicyJsonFileGenerator {
name = "all - database, udf",
description = "Policy for all - database, udf",
resources = Map(
- databaseRes(List(defaultDb, sparkCatalog, icebergNamespace, namespace1)),
+ databaseRes(defaultDb, sparkCatalog, icebergNamespace, namespace1),
allTableRes,
allColumnRes),
policyItems = List(
@@ -198,9 +204,9 @@ private object PolicyJsonFileGenerator {
private val policyAccessForDefaultDbSrcTable = KRangerPolicy(
name = "default_kent",
resources = Map(
- databaseRes(List(defaultDb, sparkCatalog)),
+ databaseRes(defaultDb, sparkCatalog),
srcTableRes,
- columnRes(List("key"))),
+ columnRes("key")),
policyItems = List(
KRangerPolicyItem(
users = List(kent),
@@ -215,7 +221,7 @@ private object PolicyJsonFileGenerator {
name = "src_key_less_than_20",
policyType = POLICY_TYPE_ROWFILTER,
resources = Map(
- databaseRes(List(defaultDb)),
+ databaseRes(defaultDb),
srcTableRes),
rowFilterPolicyItems = List(
KRangerRowFilterPolicyItem(
@@ -227,8 +233,8 @@ private object PolicyJsonFileGenerator {
name = "perm_view_key_less_than_20",
policyType = POLICY_TYPE_ROWFILTER,
resources = Map(
- databaseRes(List(defaultDb)),
- tableRes(List("perm_view"))),
+ databaseRes(defaultDb),
+ tableRes("perm_view")),
rowFilterPolicyItems = List(
KRangerRowFilterPolicyItem(
rowFilterInfo = KRangerPolicyItemRowFilterInfo(filterExpr = "key<20"),
@@ -238,8 +244,8 @@ private object PolicyJsonFileGenerator {
private val policyAccessForDefaultBobUse = KRangerPolicy(
name = "default_bob_use",
resources = Map(
- databaseRes(List("default_bob", sparkCatalog)),
- tableRes(List("table_use*")),
+ databaseRes("default_bob", sparkCatalog),
+ tableRes("table_use*"),
allColumnRes),
policyItems = List(
KRangerPolicyItem(
@@ -250,8 +256,8 @@ private object PolicyJsonFileGenerator {
private val policyAccessForDefaultBobSelect = KRangerPolicy(
name = "default_bob_select",
resources = Map(
- databaseRes(List("default_bob", sparkCatalog)),
- tableRes(List("table_select*")),
+ databaseRes("default_bob", sparkCatalog),
+ tableRes("table_select*"),
allColumnRes),
policyItems = List(
KRangerPolicyItem(
@@ -263,9 +269,9 @@ private object PolicyJsonFileGenerator {
name = "src_value_hash_perm_view",
policyType = POLICY_TYPE_DATAMASK,
resources = Map(
- databaseRes(List(defaultDb, sparkCatalog)),
+ databaseRes(defaultDb, sparkCatalog),
srcTableRes,
- columnRes(List("value1"))),
+ columnRes("value1")),
dataMaskPolicyItems = List(
KRangerDataMaskPolicyItem(
dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_HASH"),
@@ -277,9 +283,9 @@ private object PolicyJsonFileGenerator {
name = "src_value_hash",
policyType = POLICY_TYPE_DATAMASK,
resources = Map(
- databaseRes(List(defaultDb, sparkCatalog)),
- tableRes(List("perm_view")),
- columnRes(List("value1"))),
+ databaseRes(defaultDb, sparkCatalog),
+ tableRes("perm_view"),
+ columnRes("value1")),
dataMaskPolicyItems = List(
KRangerDataMaskPolicyItem(
dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_HASH"),
@@ -291,9 +297,9 @@ private object PolicyJsonFileGenerator {
name = "src_value2_nullify",
policyType = POLICY_TYPE_DATAMASK,
resources = Map(
- databaseRes(List(defaultDb, sparkCatalog, icebergNamespace, namespace1)),
+ databaseRes(defaultDb, sparkCatalog, icebergNamespace, namespace1),
srcTableRes,
- columnRes(List("value2"))),
+ columnRes("value2")),
dataMaskPolicyItems = List(
KRangerDataMaskPolicyItem(
dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK"),
@@ -305,9 +311,9 @@ private object PolicyJsonFileGenerator {
name = "src_value3_sf4",
policyType = POLICY_TYPE_DATAMASK,
resources = Map(
- databaseRes(List(defaultDb, sparkCatalog)),
+ databaseRes(defaultDb, sparkCatalog),
srcTableRes,
- columnRes(List("value3"))),
+ columnRes("value3")),
dataMaskPolicyItems = List(
KRangerDataMaskPolicyItem(
dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_SHOW_FIRST_4"),
@@ -319,9 +325,9 @@ private object PolicyJsonFileGenerator {
name = "src_value4_sf4",
policyType = POLICY_TYPE_DATAMASK,
resources = Map(
- databaseRes(List(defaultDb, sparkCatalog)),
+ databaseRes(defaultDb, sparkCatalog),
srcTableRes,
- columnRes(List("value4"))),
+ columnRes("value4")),
dataMaskPolicyItems = List(
KRangerDataMaskPolicyItem(
dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_DATE_SHOW_YEAR"),
@@ -333,9 +339,9 @@ private object PolicyJsonFileGenerator {
name = "src_value5_sf4",
policyType = POLICY_TYPE_DATAMASK,
resources = Map(
- databaseRes(List(defaultDb, sparkCatalog)),
+ databaseRes(defaultDb, sparkCatalog),
srcTableRes,
- columnRes(List("value5"))),
+ columnRes("value5")),
dataMaskPolicyItems = List(
KRangerDataMaskPolicyItem(
dataMaskInfo = KRangerPolicyItemDataMaskInfo(dataMaskType = "MASK_SHOW_LAST_4"),
@@ -346,8 +352,8 @@ private object PolicyJsonFileGenerator {
private val policyAccessForPermViewAccessOnly = KRangerPolicy(
name = "someone_access_perm_view",
resources = Map(
- databaseRes(List(defaultDb)),
- tableRes(List("perm_view")),
+ databaseRes(defaultDb),
+ tableRes("perm_view"),
allColumnRes),
policyItems = List(
KRangerPolicyItem(
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala
index 56a68b82fd6..14405f81698 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala
@@ -22,6 +22,8 @@ import scala.language.implicitConversions
import org.apache.ranger.plugin.model.RangerPolicy
import org.apache.ranger.plugin.model.RangerPolicy._
+import org.apache.kyuubi.plugin.spark.authz.gen.RangerClassConversions.getRangerObject
+
trait RangerObjectGenerator[T] {
def get: T
}
@@ -83,6 +85,17 @@ case class KRangerPolicyResource(
}
}
+object KRangerPolicyResource {
+ def databaseRes(values: String*): (String, RangerPolicyResource) =
+ "database" -> KRangerPolicyResource(values.toList)
+
+ def tableRes(values: String*): (String, RangerPolicyResource) =
+ "table" -> KRangerPolicyResource(values.toList)
+
+ def columnRes(values: String*): (String, RangerPolicyResource) =
+ "column" -> KRangerPolicyResource(values.toList)
+}
+
case class KRangerPolicyItemCondition(
`type`: String,
values: List[String]) extends RangerObjectGenerator[RangerPolicyItemCondition] {
@@ -182,3 +195,17 @@ case class KRangerPolicyItemRowFilterInfo(
i
}
}
+
+object RangerAccessType {
+ val select = "select"
+ val update = "update"
+ val create = "create"
+ val drop = "drop"
+ val alter = "alter"
+ val index = "index"
+ val lock = "lock"
+ val all = "all"
+ val read = "read"
+ val write = "write"
+ val use = "use"
+}
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json b/extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json
index 0b2acff5a32..6c160d3216a 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json
+++ b/extensions/spark/kyuubi-spark-authz/src/test/resources/sparkSql_hive_jenkins.json
@@ -4,8 +4,8 @@
"policyVersion" : 85,
"policyUpdateTime" : "20190429-21:36:09.000-+0800",
"policies" : [ {
- "id" : 1,
- "guid" : "c4ca4238-a0b9-3382-8dcc-509a6f75849b",
+ "id" : 0,
+ "guid" : "cfcd2084-95d5-35ef-a6e7-dff9f98764da",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -70,8 +70,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 2,
- "guid" : "c81e728d-9d4c-3f63-af06-7f89cc14862c",
+ "id" : 1,
+ "guid" : "c4ca4238-a0b9-3382-8dcc-509a6f75849b",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -146,8 +146,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 3,
- "guid" : "eccbc87e-4b5c-32fe-a830-8fd9f2a7baf3",
+ "id" : 2,
+ "guid" : "c81e728d-9d4c-3f63-af06-7f89cc14862c",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -217,8 +217,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 4,
- "guid" : "a87ff679-a2f3-371d-9181-a67b7542122c",
+ "id" : 3,
+ "guid" : "eccbc87e-4b5c-32fe-a830-8fd9f2a7baf3",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -303,8 +303,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 5,
- "guid" : "e4da3b7f-bbce-3345-9777-2b0674a318d5",
+ "id" : 4,
+ "guid" : "a87ff679-a2f3-371d-9181-a67b7542122c",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -389,8 +389,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 6,
- "guid" : "1679091c-5a88-3faf-afb5-e6087eb1b2dc",
+ "id" : 5,
+ "guid" : "e4da3b7f-bbce-3345-9777-2b0674a318d5",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -438,8 +438,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 7,
- "guid" : "8f14e45f-ceea-367a-9a36-dedd4bea2543",
+ "id" : 6,
+ "guid" : "1679091c-5a88-3faf-afb5-e6087eb1b2dc",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -490,8 +490,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 8,
- "guid" : "c9f0f895-fb98-3b91-99f5-1fd0297e236d",
+ "id" : 7,
+ "guid" : "8f14e45f-ceea-367a-9a36-dedd4bea2543",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -539,8 +539,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 9,
- "guid" : "45c48cce-2e2d-3fbd-aa1a-fc51c7c6ad26",
+ "id" : 8,
+ "guid" : "c9f0f895-fb98-3b91-99f5-1fd0297e236d",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -586,8 +586,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 10,
- "guid" : "d3d94468-02a4-3259-b55d-38e6d163e820",
+ "id" : 9,
+ "guid" : "45c48cce-2e2d-3fbd-aa1a-fc51c7c6ad26",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -633,8 +633,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 11,
- "guid" : "6512bd43-d9ca-36e0-ac99-0b0a82652dca",
+ "id" : 10,
+ "guid" : "d3d94468-02a4-3259-b55d-38e6d163e820",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -685,8 +685,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 12,
- "guid" : "c20ad4d7-6fe9-3759-aa27-a0c99bff6710",
+ "id" : 11,
+ "guid" : "6512bd43-d9ca-36e0-ac99-0b0a82652dca",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -737,8 +737,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 13,
- "guid" : "c51ce410-c124-310e-8db5-e4b97fc2af39",
+ "id" : 12,
+ "guid" : "c20ad4d7-6fe9-3759-aa27-a0c99bff6710",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -789,8 +789,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 14,
- "guid" : "aab32389-22bc-325a-af60-6eb525ffdc56",
+ "id" : 13,
+ "guid" : "c51ce410-c124-310e-8db5-e4b97fc2af39",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -841,8 +841,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 15,
- "guid" : "9bf31c7f-f062-336a-96d3-c8bd1f8f2ff3",
+ "id" : 14,
+ "guid" : "aab32389-22bc-325a-af60-6eb525ffdc56",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
@@ -893,8 +893,8 @@
"policyLabels" : [ ],
"isDenyAllElse" : false
}, {
- "id" : 16,
- "guid" : "c74d97b0-1eae-357e-84aa-9d5bade97baf",
+ "id" : 15,
+ "guid" : "9bf31c7f-f062-336a-96d3-c8bd1f8f2ff3",
"isEnabled" : true,
"version" : 1,
"service" : "hive_jenkins",
diff --git a/pom.xml b/pom.xml
index 1c702091cd5..09b2e45defd 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2075,6 +2075,7 @@
src/main/scala/**/*.scalasrc/test/scala/**/*.scala
+ src/test/gen/scala/**/*.scala${spotless.scala.scalafmt.version}
From 1603342de3fa253145a7d41cc67ca4f30e5de72d Mon Sep 17 00:00:00 2001
From: Kent Yao
Date: Thu, 13 Apr 2023 23:26:15 +0800
Subject: [PATCH 033/404] [KYUUBI #4278] Use new Apache 'closer.lua' syntax to
obtain Maven
### _Why are the changes needed?_
Use official download link and provide a way to fallback.
- Doc reference: https://infra.apache.org/release-download-pages.html#download-scripts
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
```shell
build/mvn compile
exec: curl --progress-bar -L https://www.apache.org/dyn/closer.lua/maven/maven-3/3.8.7/binaries/apache-maven-3.8.7-bin.tar.gz?action=download
```
Closes #4704 from yaooqinn/4278.
Closes #4278
27a1fc284 [Kent Yao] [KYUUBI #4278] Use new Apache 'closer.lua' syntax to obtain Maven
Authored-by: Kent Yao
Signed-off-by: Cheng Pan
---
build/mvn | 23 ++++++++++++++++++-----
1 file changed, 18 insertions(+), 5 deletions(-)
diff --git a/build/mvn b/build/mvn
index 67aa02b4f79..cd6c0c796d1 100755
--- a/build/mvn
+++ b/build/mvn
@@ -35,7 +35,7 @@ fi
## Arg2 - Tarball Name
## Arg3 - Checkable Binary
install_app() {
- local remote_tarball="$1/$2"
+ local remote_tarball="$1/$2$4"
local local_tarball="${_DIR}/$2"
local binary="${_DIR}/$3"
@@ -77,12 +77,25 @@ install_mvn() {
# See simple version normalization: http://stackoverflow.com/questions/16989598/bash-comparing-version-numbers
function version { echo "$@" | awk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'; }
if [ $(version $MVN_DETECTED_VERSION) -ne $(version $MVN_VERSION) ]; then
- local APACHE_MIRROR=${APACHE_MIRROR:-'https://archive.apache.org/dist/'}
+ local APACHE_MIRROR=${APACHE_MIRROR:-'https://www.apache.org/dyn/closer.lua'}
+ local MIRROR_URL_QUERY="?action=download"
+ local MVN_TARBALL="apache-maven-${MVN_VERSION}-bin.tar.gz"
+ local FILE_PATH="maven/maven-3/${MVN_VERSION}/binaries"
+
+ if [ $(command -v curl) ]; then
+ if ! curl -L --output /dev/null --silent --head --fail "${APACHE_MIRROR}/${FILE_PATH}/${MVN_TARBALL}${MIRROR_URL_QUERY}" ; then
+ # Fall back to archive.apache.org for older Maven
+ echo "Falling back to archive.apache.org to download Maven"
+ APACHE_MIRROR="https://archive.apache.org/dist"
+ MIRROR_URL_QUERY=""
+ fi
+ fi
install_app \
- "${APACHE_MIRROR}/maven/maven-3/${MVN_VERSION}/binaries" \
- "apache-maven-${MVN_VERSION}-bin.tar.gz" \
- "apache-maven-${MVN_VERSION}/bin/mvn"
+ "${APACHE_MIRROR}/${FILE_PATH}" \
+ "${MVN_TARBALL}" \
+ "apache-maven-${MVN_VERSION}/bin/mvn" \
+ "${MIRROR_URL_QUERY}"
MVN_BIN="${_DIR}/apache-maven-${MVN_VERSION}/bin/mvn"
fi
From b0afe7b5d63c47bcf6b484730aa3cf97acd74516 Mon Sep 17 00:00:00 2001
From: huapan
Date: Fri, 14 Apr 2023 11:58:19 +0800
Subject: [PATCH 034/404] [KYUUBI #4696] Upgrade scalafmt from 3.7.1 to 3.7.3
### _Why are the changes needed?_
Close #4696
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4705 from huapan123456/feat-upgrade-scalafmt.
Closes #4696
434f94ddb [huapan] update(#4696): add some comment
7f8d7bd50 [huapan] fix(#4696): fix all
41dba1e6f [huapan] feat(#4696): upgrade scalafmt from 3.7.1 to 3.7.3
Authored-by: huapan
Signed-off-by: Cheng Pan
---
.scalafmt.conf | 2 +-
pom.xml | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/.scalafmt.conf b/.scalafmt.conf
index e682a17f71f..1bcf16de79d 100644
--- a/.scalafmt.conf
+++ b/.scalafmt.conf
@@ -1,4 +1,4 @@
-version = 3.7.1
+version = 3.7.3
runner.dialect=scala212
project.git=true
diff --git a/pom.xml b/pom.xml
index 09b2e45defd..f772bd1b437 100644
--- a/pom.xml
+++ b/pom.xml
@@ -245,7 +245,8 @@
1.722.3.0
- 3.7.1
+
+ 3.7.3apache.releases.httpsApache Release Distribution Repository
From c0e967f1c500bc46d7038fa4f453f735d39eab1c Mon Sep 17 00:00:00 2001
From: dnskr
Date: Fri, 14 Apr 2023 11:59:31 +0800
Subject: [PATCH 035/404] [KYUUBI #4707] [K8S][HELM] Fix README typo and minor
reformatting
### _Why are the changes needed?_
The changes are needed to fix the link to Kyuubi web site and follow common template formatting style.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4707 from dnskr/fix_readme_typo_and_reformatting.
Closes #4707
cb5d24ace [dnskr] [K8S][HELM] Fix README typo and minor reformatting
Authored-by: dnskr
Signed-off-by: Cheng Pan
---
charts/kyuubi/README.md | 2 +-
charts/kyuubi/templates/_helpers.tpl | 18 +++++++++---------
charts/kyuubi/templates/kyuubi-configmap.yaml | 2 +-
charts/kyuubi/templates/kyuubi-deployment.yaml | 4 +++-
charts/kyuubi/templates/kyuubi-service.yaml | 3 +--
5 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/charts/kyuubi/README.md b/charts/kyuubi/README.md
index a01fafde137..a1e5c4b622a 100644
--- a/charts/kyuubi/README.md
+++ b/charts/kyuubi/README.md
@@ -19,7 +19,7 @@
# Helm Chart for Apache Kyuubi
-[Apache Kyuubi](https://airflow.apache.org/) is a distributed and multi-tenant gateway to provide serverless SQL on Data Warehouses and Lakehouses.
+[Apache Kyuubi](https://kyuubi.apache.org) is a distributed and multi-tenant gateway to provide serverless SQL on Data Warehouses and Lakehouses.
## Introduction
diff --git a/charts/kyuubi/templates/_helpers.tpl b/charts/kyuubi/templates/_helpers.tpl
index cd4865a1288..07e66d5f182 100644
--- a/charts/kyuubi/templates/_helpers.tpl
+++ b/charts/kyuubi/templates/_helpers.tpl
@@ -20,14 +20,14 @@ A comma separated string of enabled frontend protocols, e.g. "REST,THRIFT_BINARY
For details, see 'kyuubi.frontend.protocols': https://kyuubi.readthedocs.io/en/master/deployment/settings.html#frontend
*/}}
{{- define "kyuubi.frontend.protocols" -}}
-{{- $protocols := list }}
-{{- range $name, $frontend := .Values.server }}
- {{- if $frontend.enabled }}
- {{- $protocols = $name | snakecase | upper | append $protocols }}
+ {{- $protocols := list }}
+ {{- range $name, $frontend := .Values.server }}
+ {{- if $frontend.enabled }}
+ {{- $protocols = $name | snakecase | upper | append $protocols }}
+ {{- end }}
{{- end }}
-{{- end }}
-{{- if not $protocols }}
- {{ fail "At least one frontend protocol must be enabled!" }}
-{{- end }}
-{{- $protocols | join "," }}
+ {{- if not $protocols }}
+ {{ fail "At least one frontend protocol must be enabled!" }}
+ {{- end }}
+ {{- $protocols | join "," }}
{{- end }}
diff --git a/charts/kyuubi/templates/kyuubi-configmap.yaml b/charts/kyuubi/templates/kyuubi-configmap.yaml
index 4964e651cdb..596ec493e23 100644
--- a/charts/kyuubi/templates/kyuubi-configmap.yaml
+++ b/charts/kyuubi/templates/kyuubi-configmap.yaml
@@ -43,7 +43,7 @@ data:
## User provided Kyuubi configurations
{{- with .Values.kyuubiConf.kyuubiDefaults }}
- {{- tpl . $ | nindent 4 }}
+ {{- tpl . $ | nindent 4 }}
{{- end }}
{{- with .Values.kyuubiConf.log4j2 }}
log4j2.xml: |
diff --git a/charts/kyuubi/templates/kyuubi-deployment.yaml b/charts/kyuubi/templates/kyuubi-deployment.yaml
index 43899b6fc51..52468df4e39 100644
--- a/charts/kyuubi/templates/kyuubi-deployment.yaml
+++ b/charts/kyuubi/templates/kyuubi-deployment.yaml
@@ -42,7 +42,9 @@ spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets: {{- toYaml . | nindent 8 }}
{{- end }}
+ {{- if or .Values.serviceAccount.name .Values.serviceAccount.create }}
serviceAccountName: {{ .Values.serviceAccount.name | default .Release.Name }}
+ {{- end }}
{{- with .Values.initContainers }}
initContainers: {{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
@@ -90,7 +92,7 @@ spec:
successThreshold: {{ .Values.probe.readiness.successThreshold }}
{{- end }}
{{- with .Values.resources }}
- resources: {{- toYaml . | nindent 12 }}
+ resources: {{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: conf
diff --git a/charts/kyuubi/templates/kyuubi-service.yaml b/charts/kyuubi/templates/kyuubi-service.yaml
index 963f1fcc709..3cea30b70d4 100644
--- a/charts/kyuubi/templates/kyuubi-service.yaml
+++ b/charts/kyuubi/templates/kyuubi-service.yaml
@@ -28,8 +28,7 @@ metadata:
app.kubernetes.io/version: {{ $.Values.image.tag | default $.Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- with $frontend.service.annotations }}
- annotations:
- {{- toYaml . | nindent 4 }}
+ annotations: {{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ $frontend.service.type }}
From 0f82625109957bfa8628725a37caf2efb91901ec Mon Sep 17 00:00:00 2001
From: dnskr
Date: Fri, 14 Apr 2023 12:01:36 +0800
Subject: [PATCH 036/404] [KYUUBI #4706] [K8S][HELM] Use template comments for
the chart license header
### _Why are the changes needed?_
The changes are needed to avoid rendering license headers when debugging the chart templates.
According to [Comments (YAML Comments vs. Template Comments)](https://helm.sh/docs/chart_best_practices/templates/#comments-yaml-comments-vs-template-comments):
> YAML comments may be used when it is useful for Helm users to (possibly) see the comments during debugging
Since there is no value in seeing license during debug, it's a good idea to use Template comments instead of YAML comments.
### Before
```shell
$ helm template test charts/kyuubi
---
# Source: kyuubi/templates/kyuubi-serviceaccount.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: ServiceAccount
metadata:
name: test
labels:
helm.sh/chart: kyuubi-0.1.0
app.kubernetes.io/name: kyuubi
app.kubernetes.io/instance: test
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/managed-by: Helm
---
```
### After
```shell
$ helm template test charts/kyuubi
---
# Source: kyuubi/templates/kyuubi-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: test
labels:
helm.sh/chart: kyuubi-0.1.0
app.kubernetes.io/name: kyuubi
app.kubernetes.io/instance: test
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/managed-by: Helm
---
```
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4706 from dnskr/chart_license_header.
Closes #4706
cabaa62b6 [dnskr] [K8S][HELM] Use template comments for the chart license header
Authored-by: dnskr
Signed-off-by: Cheng Pan
---
charts/kyuubi/templates/kyuubi-configmap.yaml | 32 +++++++++----------
.../kyuubi/templates/kyuubi-deployment.yaml | 32 +++++++++----------
charts/kyuubi/templates/kyuubi-role.yaml | 32 +++++++++----------
.../kyuubi/templates/kyuubi-rolebinding.yaml | 32 +++++++++----------
charts/kyuubi/templates/kyuubi-service.yaml | 32 +++++++++----------
.../templates/kyuubi-serviceaccount.yaml | 32 +++++++++----------
6 files changed, 96 insertions(+), 96 deletions(-)
diff --git a/charts/kyuubi/templates/kyuubi-configmap.yaml b/charts/kyuubi/templates/kyuubi-configmap.yaml
index 596ec493e23..22d6562b8d8 100644
--- a/charts/kyuubi/templates/kyuubi-configmap.yaml
+++ b/charts/kyuubi/templates/kyuubi-configmap.yaml
@@ -1,19 +1,19 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+{{/*
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
apiVersion: v1
kind: ConfigMap
diff --git a/charts/kyuubi/templates/kyuubi-deployment.yaml b/charts/kyuubi/templates/kyuubi-deployment.yaml
index 52468df4e39..b30913dd09f 100644
--- a/charts/kyuubi/templates/kyuubi-deployment.yaml
+++ b/charts/kyuubi/templates/kyuubi-deployment.yaml
@@ -1,19 +1,19 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+{{/*
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
apiVersion: apps/v1
kind: Deployment
diff --git a/charts/kyuubi/templates/kyuubi-role.yaml b/charts/kyuubi/templates/kyuubi-role.yaml
index fcb5a9f6e4f..7e0a810a1aa 100644
--- a/charts/kyuubi/templates/kyuubi-role.yaml
+++ b/charts/kyuubi/templates/kyuubi-role.yaml
@@ -1,19 +1,19 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+{{/*
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
diff --git a/charts/kyuubi/templates/kyuubi-rolebinding.yaml b/charts/kyuubi/templates/kyuubi-rolebinding.yaml
index 8f74efc2dba..e7dd0d64b9f 100644
--- a/charts/kyuubi/templates/kyuubi-rolebinding.yaml
+++ b/charts/kyuubi/templates/kyuubi-rolebinding.yaml
@@ -1,19 +1,19 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+{{/*
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
diff --git a/charts/kyuubi/templates/kyuubi-service.yaml b/charts/kyuubi/templates/kyuubi-service.yaml
index 3cea30b70d4..ddc2e230f4d 100644
--- a/charts/kyuubi/templates/kyuubi-service.yaml
+++ b/charts/kyuubi/templates/kyuubi-service.yaml
@@ -1,19 +1,19 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+{{/*
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
{{- range $name, $frontend := .Values.server }}
{{- if $frontend.enabled }}
diff --git a/charts/kyuubi/templates/kyuubi-serviceaccount.yaml b/charts/kyuubi/templates/kyuubi-serviceaccount.yaml
index 770d5013669..bbfa22e35f8 100644
--- a/charts/kyuubi/templates/kyuubi-serviceaccount.yaml
+++ b/charts/kyuubi/templates/kyuubi-serviceaccount.yaml
@@ -1,19 +1,19 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+{{/*
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
{{- if .Values.serviceAccount.create }}
apiVersion: v1
From 46bddc3864d322f56069672c7639f2624587f68c Mon Sep 17 00:00:00 2001
From: sychen
Date: Fri, 14 Apr 2023 16:47:43 +0800
Subject: [PATCH 037/404] [KYUUBI #4688] Fix the failure to read the operation
log after executing catalog and database operation
### _Why are the changes needed?_
Now `GetCurrentCatalog`/`GetCurrentDatabase`/`SetCurrentCatalog`/`SetCurrentDatabase`is executed through the statement, and the jdbc client will try to obtain the operation log corresponding to the statement.
At present, these operations do not generate operation logs, so the engine log will be throw exception(`failed to generate operation log`).
```java
23/04/10 20:25:23 INFO GetCurrentCatalog: Processing anonymous's query[8218e7ed-b4a4-41ad-a1cc-6f82bf3d55bb]: INITIALIZED_STATE -> RUNNING_STATE, statement:
GetCurrentCatalog
23/04/10 20:25:23 INFO GetCurrentCatalog: Processing anonymous's query[8218e7ed-b4a4-41ad-a1cc-6f82bf3d55bb]: RUNNING_STATE -> FINISHED_STATE, time taken: 0.002 seconds
23/04/10 20:25:23 ERROR SparkTBinaryFrontendService: Error fetching results:
org.apache.kyuubi.KyuubiSQLException: OperationHandle [8218e7ed-b4a4-41ad-a1cc-6f82bf3d55bb] failed to generate operation log
at org.apache.kyuubi.KyuubiSQLException$.apply(KyuubiSQLException.scala:69)
at org.apache.kyuubi.operation.OperationManager.$anonfun$getOperationLogRowSet$2(OperationManager.scala:146)
at scala.Option.getOrElse(Option.scala:189)
```
### _How was this patch tested?_
- [x] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4688 from cxzl25/op_log_catalog.
Closes #4688
8ebc0f570 [sychen] Fix the failure to read the operation log after executing Catalog and database operation
Authored-by: sychen
Signed-off-by: Cheng Pan
---
.../flink/operation/GetCurrentCatalog.scala | 6 ++++++
.../flink/operation/GetCurrentDatabase.scala | 6 ++++++
.../flink/operation/SetCurrentCatalog.scala | 6 ++++++
.../flink/operation/SetCurrentDatabase.scala | 6 ++++++
.../spark/operation/GetCurrentCatalog.scala | 5 +++++
.../spark/operation/GetCurrentDatabase.scala | 5 +++++
.../spark/operation/SetCurrentCatalog.scala | 5 +++++
.../spark/operation/SetCurrentDatabase.scala | 5 +++++
.../trino/operation/GetCurrentCatalog.scala | 5 +++++
.../trino/operation/GetCurrentDatabase.scala | 5 +++++
.../trino/operation/SetCurrentCatalog.scala | 5 +++++
.../trino/operation/SetCurrentDatabase.scala | 5 +++++
.../kyuubi/operation/log/OperationLog.scala | 2 ++
.../operation/log/OperationLogSuite.scala | 21 +++++++++++++++++++
14 files changed, 87 insertions(+)
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala
index 988072e8da4..3e42e9aa6ec 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala
@@ -18,11 +18,17 @@
package org.apache.kyuubi.engine.flink.operation
import org.apache.kyuubi.engine.flink.result.ResultSetUtil
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT
import org.apache.kyuubi.session.Session
class GetCurrentCatalog(session: Session) extends FlinkOperation(session) {
+ private val operationLog: OperationLog =
+ OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def runInternal(): Unit = {
try {
val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala
index 8315a18d3d8..014ca2ea379 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala
@@ -18,11 +18,17 @@
package org.apache.kyuubi.engine.flink.operation
import org.apache.kyuubi.engine.flink.result.ResultSetUtil
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_SCHEM
import org.apache.kyuubi.session.Session
class GetCurrentDatabase(session: Session) extends FlinkOperation(session) {
+ private val operationLog: OperationLog =
+ OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def runInternal(): Unit = {
try {
val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala
index 489cc638458..60214b2cd0f 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala
@@ -17,11 +17,17 @@
package org.apache.kyuubi.engine.flink.operation
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.session.Session
class SetCurrentCatalog(session: Session, catalog: String)
extends FlinkOperation(session) {
+ private val operationLog: OperationLog =
+ OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def runInternal(): Unit = {
try {
val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala
index 0d3598405d8..7610ab2f18c 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala
@@ -17,11 +17,17 @@
package org.apache.kyuubi.engine.flink.operation
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.session.Session
class SetCurrentDatabase(session: Session, database: String)
extends FlinkOperation(session) {
+ private val operationLog: OperationLog =
+ OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def runInternal(): Unit = {
try {
val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentCatalog.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentCatalog.scala
index 66d707ec033..96e0132848d 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentCatalog.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentCatalog.scala
@@ -21,11 +21,16 @@ import org.apache.spark.sql.types.StructType
import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim
import org.apache.kyuubi.operation.IterableFetchIterator
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT
import org.apache.kyuubi.session.Session
class GetCurrentCatalog(session: Session) extends SparkOperation(session) {
+ private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def resultSchema: StructType = {
new StructType()
.add(TABLE_CAT, "string", nullable = true, "Catalog name.")
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentDatabase.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentDatabase.scala
index bcf3ad2a5f0..10b325d76d9 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentDatabase.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/GetCurrentDatabase.scala
@@ -21,11 +21,16 @@ import org.apache.spark.sql.types.StructType
import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim
import org.apache.kyuubi.operation.IterableFetchIterator
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_SCHEM
import org.apache.kyuubi.session.Session
class GetCurrentDatabase(session: Session) extends SparkOperation(session) {
+ private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def resultSchema: StructType = {
new StructType()
.add(TABLE_SCHEM, "string", nullable = true, "Schema name.")
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentCatalog.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentCatalog.scala
index 4e8c0aa69a4..7571c3e3295 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentCatalog.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentCatalog.scala
@@ -20,10 +20,15 @@ package org.apache.kyuubi.engine.spark.operation
import org.apache.spark.sql.types.StructType
import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.session.Session
class SetCurrentCatalog(session: Session, catalog: String) extends SparkOperation(session) {
+ private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def resultSchema: StructType = {
new StructType()
}
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentDatabase.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentDatabase.scala
index 0a21bc83965..2112f544ab2 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentDatabase.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SetCurrentDatabase.scala
@@ -20,11 +20,16 @@ package org.apache.kyuubi.engine.spark.operation
import org.apache.spark.sql.types.StructType
import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.session.Session
class SetCurrentDatabase(session: Session, database: String)
extends SparkOperation(session) {
+ private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def resultSchema: StructType = {
new StructType()
}
diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentCatalog.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentCatalog.scala
index 3d8c7fd6c5b..504a53a4149 100644
--- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentCatalog.scala
+++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentCatalog.scala
@@ -23,11 +23,16 @@ import io.trino.client.ClientStandardTypes.VARCHAR
import io.trino.client.ClientTypeSignature.VARCHAR_UNBOUNDED_LENGTH
import org.apache.kyuubi.operation.IterableFetchIterator
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.session.Session
class GetCurrentCatalog(session: Session)
extends TrinoOperation(session) {
+ private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def runInternal(): Unit = {
try {
val session = trinoContext.clientSession.get
diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentDatabase.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentDatabase.scala
index 3bf2987b46a..3ab598ef09e 100644
--- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentDatabase.scala
+++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/GetCurrentDatabase.scala
@@ -23,11 +23,16 @@ import io.trino.client.ClientStandardTypes.VARCHAR
import io.trino.client.ClientTypeSignature.VARCHAR_UNBOUNDED_LENGTH
import org.apache.kyuubi.operation.IterableFetchIterator
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.session.Session
class GetCurrentDatabase(session: Session)
extends TrinoOperation(session) {
+ private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def runInternal(): Unit = {
try {
val session = trinoContext.clientSession.get
diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentCatalog.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentCatalog.scala
index 09ba4262f70..16836b0a97d 100644
--- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentCatalog.scala
+++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentCatalog.scala
@@ -19,11 +19,16 @@ package org.apache.kyuubi.engine.trino.operation
import io.trino.client.ClientSession
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.session.Session
class SetCurrentCatalog(session: Session, catalog: String)
extends TrinoOperation(session) {
+ private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def runInternal(): Unit = {
try {
val session = trinoContext.clientSession.get
diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentDatabase.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentDatabase.scala
index f25cc9e0c6d..aa4697f5f0e 100644
--- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentDatabase.scala
+++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/operation/SetCurrentDatabase.scala
@@ -19,11 +19,16 @@ package org.apache.kyuubi.engine.trino.operation
import io.trino.client.ClientSession
+import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.session.Session
class SetCurrentDatabase(session: Session, database: String)
extends TrinoOperation(session) {
+ private val operationLog: OperationLog = OperationLog.createOperationLog(session, getHandle)
+
+ override def getOperationLog: Option[OperationLog] = Option(operationLog)
+
override protected def runInternal(): Unit = {
try {
val session = trinoContext.clientSession.get
diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala
index e6312d0fb84..3791c08d24c 100644
--- a/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala
+++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/operation/log/OperationLog.scala
@@ -195,6 +195,8 @@ class OperationLog(path: Path) {
}
def close(): Unit = synchronized {
+ if (!initialized) return
+
closeExtraReaders()
trySafely {
diff --git a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/log/OperationLogSuite.scala b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/log/OperationLogSuite.scala
index fe3cbc7fc75..b333b59fd4b 100644
--- a/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/log/OperationLogSuite.scala
+++ b/kyuubi-common/src/test/scala/org/apache/kyuubi/operation/log/OperationLogSuite.scala
@@ -297,4 +297,25 @@ class OperationLogSuite extends KyuubiFunSuite {
Utils.deleteDirectoryRecursively(extraFile.toFile)
}
}
+
+ test("Closing the unwritten operation log should not throw an exception") {
+ val sessionManager = new NoopSessionManager
+ sessionManager.initialize(KyuubiConf())
+ val sHandle = sessionManager.openSession(
+ TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10,
+ "kyuubi",
+ "passwd",
+ "localhost",
+ Map.empty)
+ val session = sessionManager.getSession(sHandle)
+ OperationLog.createOperationLogRootDirectory(session)
+ val oHandle = OperationHandle()
+
+ val log = OperationLog.createOperationLog(session, oHandle)
+ val tRowSet = log.read(1)
+ assert(tRowSet == ThriftUtils.newEmptyRowSet)
+ // close the operation log without writing
+ log.close()
+ session.close()
+ }
}
From 7b94196ab1fb5d5cb636b20cd76f08dd41b2b2dc Mon Sep 17 00:00:00 2001
From: zwangsheng <2213335496@qq.com>
Date: Fri, 14 Apr 2023 16:52:54 +0800
Subject: [PATCH 038/404] [KYUUBI #4681][Engine] Set thread
`CreateSparkTimeoutChecker` daemon
### _Why are the changes needed?_
Close #4681
Set `CreateSparkTimeoutChecker` in `SparkSQLEngine` daemon.
Exit when spark session initialize fail.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4682 from zwangsheng/KYUUBI_4681.
Closes #4681
1928a67ec [zwangsheng] Add thread name
57f1914e4 [zwangsheng] Add thread name
71ff31a2b [zwangsheng] revert
4e8a619b2 [zwangsheng] DEBUG
ea23fae11 [zwangsheng] Change Init Timeout => 10M
3a89acc64 [zwangsheng] fix comments
565d1c90a [zwangsheng] [KYUUBI #4681][Engine] Set thread daemon
Authored-by: zwangsheng <2213335496@qq.com>
Signed-off-by: Cheng Pan
---
.../org/apache/kyuubi/engine/spark/SparkSQLEngine.scala | 6 ++++--
.../kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala | 1 +
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkSQLEngine.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkSQLEngine.scala
index 42e7c44a137..6f8aa2ec09f 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkSQLEngine.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/SparkSQLEngine.scala
@@ -359,7 +359,7 @@ object SparkSQLEngine extends Logging {
private def startInitTimeoutChecker(startTime: Long, timeout: Long): Unit = {
val mainThread = Thread.currentThread()
- new Thread(
+ val checker = new Thread(
() => {
while (System.currentTimeMillis() - startTime < timeout && !sparkSessionCreated.get()) {
Thread.sleep(500)
@@ -368,7 +368,9 @@ object SparkSQLEngine extends Logging {
mainThread.interrupt()
}
},
- "CreateSparkTimeoutChecker").start()
+ "CreateSparkTimeoutChecker")
+ checker.setDaemon(true)
+ checker.start()
}
private def isOnK8sClusterMode: Boolean = {
diff --git a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala
index 5141ff4d7ea..74090bc40b0 100644
--- a/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala
+++ b/integration-tests/kyuubi-kubernetes-it/src/test/scala/org/apache/kyuubi/kubernetes/test/spark/SparkOnKubernetesTestsSuite.scala
@@ -57,6 +57,7 @@ abstract class SparkOnKubernetesSuiteBase
.set("spark.kubernetes.executor.request.cores", "250m")
.set("kyuubi.kubernetes.context", "minikube")
.set("kyuubi.frontend.protocols", "THRIFT_BINARY,REST")
+ .set("kyuubi.session.engine.initialize.timeout", "PT10M")
}
}
From db46b5b320ffc3e58f84a0c3bb0d113783b9612b Mon Sep 17 00:00:00 2001
From: Fu Chen
Date: Fri, 14 Apr 2023 20:46:28 +0800
Subject: [PATCH 039/404] [KYUUBI #4711] JDBC client should catch task failed
exception instead of NPE in the incremental mode
### _Why are the changes needed?_
Since the job was lazily submitted in the incremental mode, the engine should not catch the task failed exception even though the operation is in the terminal state.
Before this PR:
```
0: jdbc:hive2://0.0.0.0:10009/> set kyuubi.operation.incremental.collect=true;
+---------------------------------------+--------+
| key | value |
+---------------------------------------+--------+
| kyuubi.operation.incremental.collect | true |
+---------------------------------------+--------+
0: jdbc:hive2://0.0.0.0:10009/> SELECT raise_error('custom error message');
Error: (state=,code=0)
0: jdbc:hive2://0.0.0.0:10009/>
```
kyuubi server log
```
2023-04-14 18:47:50.185 ERROR org.apache.kyuubi.server.KyuubiTBinaryFrontendService: Error fetching results:
java.lang.NullPointerException: null
at org.apache.kyuubi.server.BackendServiceMetric.$anonfun$fetchResults$1(BackendServiceMetric.scala:191) ~[classes/:?]
at org.apache.kyuubi.metrics.MetricsSystem$.timerTracing(MetricsSystem.scala:111) ~[classes/:?]
at org.apache.kyuubi.server.BackendServiceMetric.fetchResults(BackendServiceMetric.scala:187) ~[classes/:?]
at org.apache.kyuubi.server.BackendServiceMetric.fetchResults$(BackendServiceMetric.scala:182) ~[classes/:?]
at org.apache.kyuubi.server.KyuubiServer$$anon$1.fetchResults(KyuubiServer.scala:147) ~[classes/:?]
at org.apache.kyuubi.service.TFrontendService.FetchResults(TFrontendService.scala:530) [classes/:?]
```
After this PR:
```
0: jdbc:hive2://0.0.0.0:10009/> set kyuubi.operation.incremental.collect=true;
+---------------------------------------+--------+
| key | value |
+---------------------------------------+--------+
| kyuubi.operation.incremental.collect | true |
+---------------------------------------+--------+
0: jdbc:hive2://0.0.0.0:10009/> SELECT raise_error('custom error message');
Error: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 3.0 failed 1 times, most recent failure: Lost task 0.0 in stage 3.0 (TID 3) (0.0.0.0 executor driver): java.lang.RuntimeException: custom error message
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.project_doConsume_0$(Unknown Source)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:760)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:364)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:890)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:890)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:136)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:548)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1504)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:551)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2672)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2608)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2607)
at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2607)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1182)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1182)
at scala.Option.foreach(Option.scala:407)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1182)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2860)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2802)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2791)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:952)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2228)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2249)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2268)
at org.apache.spark.rdd.RDD.collectPartition$1(RDD.scala:1036)
at org.apache.spark.rdd.RDD.$anonfun$toLocalIterator$3(RDD.scala:1038)
at org.apache.spark.rdd.RDD.$anonfun$toLocalIterator$3$adapted(RDD.scala:1038)
at scala.collection.Iterator$$anon$11.nextCur(Iterator.scala:486)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:492)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:491)
at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:460)
at org.apache.kyuubi.operation.IterableFetchIterator.hasNext(FetchIterator.scala:97)
at scala.collection.Iterator$SliceIterator.hasNext(Iterator.scala:268)
at scala.collection.Iterator.toStream(Iterator.scala:1417)
at scala.collection.Iterator.toStream$(Iterator.scala:1416)
at scala.collection.AbstractIterator.toStream(Iterator.scala:1431)
at scala.collection.TraversableOnce.toSeq(TraversableOnce.scala:354)
at scala.collection.TraversableOnce.toSeq$(TraversableOnce.scala:354)
at scala.collection.AbstractIterator.toSeq(Iterator.scala:1431)
at org.apache.kyuubi.engine.spark.operation.SparkOperation.$anonfun$getNextRowSet$1(SparkOperation.scala:265)
at org.apache.kyuubi.engine.spark.operation.SparkOperation.$anonfun$withLocalProperties$1(SparkOperation.scala:155)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:169)
at org.apache.kyuubi.engine.spark.operation.SparkOperation.withLocalProperties(SparkOperation.scala:139)
at org.apache.kyuubi.engine.spark.operation.SparkOperation.getNextRowSet(SparkOperation.scala:243)
at org.apache.kyuubi.operation.OperationManager.getOperationNextRowSet(OperationManager.scala:141)
at org.apache.kyuubi.session.AbstractSession.fetchResults(AbstractSession.scala:240)
at org.apache.kyuubi.service.AbstractBackendService.fetchResults(AbstractBackendService.scala:214)
at org.apache.kyuubi.service.TFrontendService.FetchResults(TFrontendService.scala:530)
at org.apache.hive.service.rpc.thrift.TCLIService$Processor$FetchResults.getResult(TCLIService.java:1837)
at org.apache.hive.service.rpc.thrift.TCLIService$Processor$FetchResults.getResult(TCLIService.java:1822)
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)
at org.apache.kyuubi.service.authentication.TSetIpAddressProcessor.process(TSetIpAddressProcessor.scala:36)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.RuntimeException: custom error message
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.project_doConsume_0$(Unknown Source)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:760)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:364)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:890)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:890)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:136)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:548)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1504)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:551)
... 3 more (state=,code=0)
0: jdbc:hive2://0.0.0.0:10009/>
```
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4711 from cfmcgrady/incremental-show-error-msg.
Closes #4711
66bb527ce [Fu Chen] JDBC client should catch task failed exception in the incremental mode
Authored-by: Fu Chen
Signed-off-by: Cheng Pan
---
.../engine/spark/operation/SparkOperation.scala | 5 +++--
.../KyuubiOperationPerConnectionSuite.scala | 12 +++++++++++-
2 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkOperation.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkOperation.scala
index eb58407d47c..cb7510a890b 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkOperation.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/operation/SparkOperation.scala
@@ -181,8 +181,9 @@ abstract class SparkOperation(session: Session)
setOperationException(ke)
throw ke
} else if (isTerminalState(state)) {
- setOperationException(KyuubiSQLException(errMsg))
- warn(s"Ignore exception in terminal state with $statementId: $errMsg")
+ val ke = KyuubiSQLException(errMsg)
+ setOperationException(ke)
+ throw ke
} else {
error(s"Error operating $opType: $errMsg", e)
val ke = KyuubiSQLException(s"Error operating $opType: $errMsg", e)
diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerConnectionSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerConnectionSuite.scala
index d04afbfb580..d0f1f065de8 100644
--- a/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerConnectionSuite.scala
+++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/operation/KyuubiOperationPerConnectionSuite.scala
@@ -31,7 +31,7 @@ import org.apache.kyuubi.config.{KyuubiConf, KyuubiReservedKeys}
import org.apache.kyuubi.config.KyuubiConf.SESSION_CONF_ADVISOR
import org.apache.kyuubi.engine.ApplicationState
import org.apache.kyuubi.jdbc.KyuubiHiveDriver
-import org.apache.kyuubi.jdbc.hive.KyuubiConnection
+import org.apache.kyuubi.jdbc.hive.{KyuubiConnection, KyuubiSQLException}
import org.apache.kyuubi.metrics.{MetricsConstants, MetricsSystem}
import org.apache.kyuubi.plugin.SessionConfAdvisor
import org.apache.kyuubi.session.{KyuubiSessionManager, SessionType}
@@ -281,6 +281,16 @@ class KyuubiOperationPerConnectionSuite extends WithKyuubiServer with HiveJDBCTe
assert(rs.getString(2) === KYUUBI_VERSION)
}
}
+
+ test("JDBC client should catch task failed exception in the incremental mode") {
+ withJdbcStatement() { statement =>
+ statement.executeQuery(s"set ${KyuubiConf.OPERATION_INCREMENTAL_COLLECT.key}=true;")
+ val resultSet = statement.executeQuery(
+ "SELECT raise_error('client should catch this exception');")
+ val e = intercept[KyuubiSQLException](resultSet.next())
+ assert(e.getMessage.contains("client should catch this exception"))
+ }
+ }
}
class TestSessionConfAdvisor extends SessionConfAdvisor {
From 93ba8f762f35e5467dbb6cd51ef4e82ba2f74d05 Mon Sep 17 00:00:00 2001
From: Anurag Rajawat
Date: Mon, 17 Apr 2023 09:14:20 +0800
Subject: [PATCH 040/404] [KYUUBI #4712] Bump Spark from 3.2.3 to 3.2.4
### _Why are the changes needed?_
Fixes #4712
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4718 from anurag-rajawat/upgrade-spark.
Closes #4712
79dcf1b79 [Anurag Rajawat] Bump Spark from 3.2.3 to 3.2.4
Authored-by: Anurag Rajawat
Signed-off-by: liangbowen
---
.github/workflows/master.yml | 2 +-
...plugin.spark.authz.serde.FunctionExtractor | 1 +
...in.spark.authz.serde.FunctionTypeExtractor | 1 +
.../src/main/resources/scan_command_spec.json | 29 ---
.../src/main/resources/scan_spec.json | 89 ++++++++
.../main/resources/table_command_spec.json | 16 +-
.../spark/authz/PrivilegesBuilder.scala | 23 ++
.../spark/authz/serde/CommandSpec.scala | 16 +-
.../authz/serde/functionExtractors.scala | 22 ++
.../authz/serde/functionTypeExtractors.scala | 36 +++-
.../plugin/spark/authz/serde/package.scala | 20 +-
.../FunctionPrivilegesBuilderSuite.scala | 196 ++++++++++++++++++
.../authz/gen/JsonSpecFileGenerator.scala | 9 +-
.../kyuubi/plugin/spark/authz/gen/Scans.scala | 28 ++-
.../spark/authz/gen/TableCommands.scala | 2 +-
pom.xml | 2 +-
16 files changed, 436 insertions(+), 56 deletions(-)
delete mode 100644 extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json
create mode 100644 extensions/spark/kyuubi-spark-authz/src/main/resources/scan_spec.json
create mode 100644 extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index ae5b8188d82..ece87e26526 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -61,7 +61,7 @@ jobs:
comment: 'verify-on-spark-3.1-binary'
- java: 8
spark: '3.3'
- spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.2.3 -Dspark.archive.name=spark-3.2.3-bin-hadoop3.2.tgz'
+ spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.2.4 -Dspark.archive.name=spark-3.2.4-bin-hadoop3.2.tgz'
exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.HudiTest,org.apache.kyuubi.tags.IcebergTest'
comment: 'verify-on-spark-3.2-binary'
env:
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor
index 4686bb033cf..2facb004a04 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor
@@ -17,4 +17,5 @@
org.apache.kyuubi.plugin.spark.authz.serde.ExpressionInfoFunctionExtractor
org.apache.kyuubi.plugin.spark.authz.serde.FunctionIdentifierFunctionExtractor
+org.apache.kyuubi.plugin.spark.authz.serde.QualifiedNameStringFunctionExtractor
org.apache.kyuubi.plugin.spark.authz.serde.StringFunctionExtractor
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor
index 475f47afc24..3bb0ee6c23e 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor
@@ -17,4 +17,5 @@
org.apache.kyuubi.plugin.spark.authz.serde.ExpressionInfoFunctionTypeExtractor
org.apache.kyuubi.plugin.spark.authz.serde.FunctionIdentifierFunctionTypeExtractor
+org.apache.kyuubi.plugin.spark.authz.serde.FunctionNameFunctionTypeExtractor
org.apache.kyuubi.plugin.spark.authz.serde.TempMarkerFunctionTypeExtractor
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json
deleted file mode 100644
index 9a6aef4ed98..00000000000
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json
+++ /dev/null
@@ -1,29 +0,0 @@
-[ {
- "classname" : "org.apache.kyuubi.plugin.spark.authz.util.PermanentViewMarker",
- "scanDescs" : [ {
- "fieldName" : "catalogTable",
- "fieldExtractor" : "CatalogTableTableExtractor",
- "catalogDesc" : null
- } ]
-}, {
- "classname" : "org.apache.spark.sql.catalyst.catalog.HiveTableRelation",
- "scanDescs" : [ {
- "fieldName" : "tableMeta",
- "fieldExtractor" : "CatalogTableTableExtractor",
- "catalogDesc" : null
- } ]
-}, {
- "classname" : "org.apache.spark.sql.execution.datasources.LogicalRelation",
- "scanDescs" : [ {
- "fieldName" : "catalogTable",
- "fieldExtractor" : "CatalogTableOptionTableExtractor",
- "catalogDesc" : null
- } ]
-}, {
- "classname" : "org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation",
- "scanDescs" : [ {
- "fieldName" : null,
- "fieldExtractor" : "DataSourceV2RelationTableExtractor",
- "catalogDesc" : null
- } ]
-} ]
\ No newline at end of file
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_spec.json
new file mode 100644
index 00000000000..3273ccbeaf0
--- /dev/null
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_spec.json
@@ -0,0 +1,89 @@
+[ {
+ "classname" : "org.apache.kyuubi.plugin.spark.authz.util.PermanentViewMarker",
+ "scanDescs" : [ {
+ "fieldName" : "catalogTable",
+ "fieldExtractor" : "CatalogTableTableExtractor",
+ "catalogDesc" : null
+ } ],
+ "functionDescs" : [ ]
+}, {
+ "classname" : "org.apache.spark.sql.catalyst.catalog.HiveTableRelation",
+ "scanDescs" : [ {
+ "fieldName" : "tableMeta",
+ "fieldExtractor" : "CatalogTableTableExtractor",
+ "catalogDesc" : null
+ } ],
+ "functionDescs" : [ ]
+}, {
+ "classname" : "org.apache.spark.sql.execution.datasources.LogicalRelation",
+ "scanDescs" : [ {
+ "fieldName" : "catalogTable",
+ "fieldExtractor" : "CatalogTableOptionTableExtractor",
+ "catalogDesc" : null
+ } ],
+ "functionDescs" : [ ]
+}, {
+ "classname" : "org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation",
+ "scanDescs" : [ {
+ "fieldName" : null,
+ "fieldExtractor" : "DataSourceV2RelationTableExtractor",
+ "catalogDesc" : null
+ } ],
+ "functionDescs" : [ ]
+}, {
+ "classname" : "org.apache.spark.sql.hive.HiveGenericUDF",
+ "scanDescs" : [ ],
+ "functionDescs" : [ {
+ "fieldName" : "name",
+ "fieldExtractor" : "QualifiedNameStringFunctionExtractor",
+ "databaseDesc" : null,
+ "functionTypeDesc" : {
+ "fieldName" : "name",
+ "fieldExtractor" : "FunctionNameFunctionTypeExtractor",
+ "skipTypes" : [ "TEMP", "SYSTEM" ]
+ },
+ "isInput" : true
+ } ]
+}, {
+ "classname" : "org.apache.spark.sql.hive.HiveGenericUDTF",
+ "scanDescs" : [ ],
+ "functionDescs" : [ {
+ "fieldName" : "name",
+ "fieldExtractor" : "QualifiedNameStringFunctionExtractor",
+ "databaseDesc" : null,
+ "functionTypeDesc" : {
+ "fieldName" : "name",
+ "fieldExtractor" : "FunctionNameFunctionTypeExtractor",
+ "skipTypes" : [ "TEMP", "SYSTEM" ]
+ },
+ "isInput" : true
+ } ]
+}, {
+ "classname" : "org.apache.spark.sql.hive.HiveSimpleUDF",
+ "scanDescs" : [ ],
+ "functionDescs" : [ {
+ "fieldName" : "name",
+ "fieldExtractor" : "QualifiedNameStringFunctionExtractor",
+ "databaseDesc" : null,
+ "functionTypeDesc" : {
+ "fieldName" : "name",
+ "fieldExtractor" : "FunctionNameFunctionTypeExtractor",
+ "skipTypes" : [ "TEMP", "SYSTEM" ]
+ },
+ "isInput" : true
+ } ]
+}, {
+ "classname" : "org.apache.spark.sql.hive.HiveUDAFFunction",
+ "scanDescs" : [ ],
+ "functionDescs" : [ {
+ "fieldName" : "name",
+ "fieldExtractor" : "QualifiedNameStringFunctionExtractor",
+ "databaseDesc" : null,
+ "functionTypeDesc" : {
+ "fieldName" : "name",
+ "fieldExtractor" : "FunctionNameFunctionTypeExtractor",
+ "skipTypes" : [ "TEMP", "SYSTEM" ]
+ },
+ "isInput" : true
+ } ]
+} ]
\ No newline at end of file
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
index 81ccd8da085..3d6fcd93ba7 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
@@ -1243,14 +1243,6 @@
"fieldName" : "query",
"fieldExtractor" : "LogicalPlanQueryExtractor"
} ]
-}, {
- "classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand",
- "tableDescs" : [ ],
- "opType" : "QUERY",
- "queryDescs" : [ {
- "fieldName" : "query",
- "fieldExtractor" : "LogicalPlanQueryExtractor"
- } ]
}, {
"classname" : "org.apache.spark.sql.execution.datasources.RefreshTable",
"tableDescs" : [ {
@@ -1293,6 +1285,14 @@
"fieldName" : "query",
"fieldExtractor" : "LogicalPlanQueryExtractor"
} ]
+}, {
+ "classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand",
+ "tableDescs" : [ ],
+ "opType" : "QUERY",
+ "queryDescs" : [ {
+ "fieldName" : "query",
+ "fieldExtractor" : "LogicalPlanQueryExtractor"
+ } ]
}, {
"classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveTable",
"tableDescs" : [ {
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala
index b8220ea2732..98e4361894c 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala
@@ -235,4 +235,27 @@ object PrivilegesBuilder {
}
(inputObjs, outputObjs, opType)
}
+
+ /**
+ * Build input privilege objects from a Spark's LogicalPlan for hive permanent functions
+ *
+ * For `Command`s and other queries, build inputs.
+ *
+ * @param plan A Spark LogicalPlan
+ */
+ def buildFunctionPrivileges(
+ plan: LogicalPlan,
+ spark: SparkSession): PrivilegesAndOpType = {
+ val inputObjs = new ArrayBuffer[PrivilegeObject]
+ plan transformAllExpressions {
+ case hiveFunction: Expression if isKnowFunction(hiveFunction) =>
+ val functionSpec: ScanSpec = getFunctionSpec(hiveFunction)
+ if (functionSpec.functionDescs.exists(!_.functionTypeDesc.get.skip(hiveFunction, spark))) {
+ functionSpec.functions(hiveFunction).foreach(func =>
+ inputObjs += PrivilegeObject(func))
+ }
+ hiveFunction
+ }
+ (inputObjs, Seq.empty, OperationType.QUERY)
+ }
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala
index e96ef8cbfd6..32ad30e211f 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala
@@ -19,6 +19,7 @@ package org.apache.kyuubi.plugin.spark.authz.serde
import com.fasterxml.jackson.annotation.JsonIgnore
import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.slf4j.LoggerFactory
@@ -94,7 +95,8 @@ case class TableCommandSpec(
case class ScanSpec(
classname: String,
- scanDescs: Seq[ScanDesc]) extends CommandSpec {
+ scanDescs: Seq[ScanDesc],
+ functionDescs: Seq[FunctionDesc] = Seq.empty) extends CommandSpec {
override def opType: String = OperationType.QUERY.toString
def tables: (LogicalPlan, SparkSession) => Seq[Table] = (plan, spark) => {
scanDescs.flatMap { td =>
@@ -107,4 +109,16 @@ case class ScanSpec(
}
}
}
+
+ def functions: (Expression) => Seq[Function] = (expr) => {
+ functionDescs.flatMap { fd =>
+ try {
+ Some(fd.extract(expr))
+ } catch {
+ case e: Exception =>
+ LOG.debug(fd.error(expr, e))
+ None
+ }
+ }
+ }
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala
index 894a6cb8f2f..72952120060 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala
@@ -20,12 +20,23 @@ package org.apache.kyuubi.plugin.spark.authz.serde
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.expressions.ExpressionInfo
+import org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor.buildFunctionIdentFromQualifiedName
+
trait FunctionExtractor extends (AnyRef => Function) with Extractor
object FunctionExtractor {
val functionExtractors: Map[String, FunctionExtractor] = {
loadExtractorsToMap[FunctionExtractor]
}
+
+ def buildFunctionIdentFromQualifiedName(qualifiedName: String): (String, Option[String]) = {
+ val parts: Array[String] = qualifiedName.split("\\.", 2)
+ if (parts.length == 1) {
+ (qualifiedName, None)
+ } else {
+ (parts.last, Some(parts.head))
+ }
+ }
}
/**
@@ -37,6 +48,17 @@ class StringFunctionExtractor extends FunctionExtractor {
}
}
+/**
+ * * String
+ */
+class QualifiedNameStringFunctionExtractor extends FunctionExtractor {
+ override def apply(v1: AnyRef): Function = {
+ val qualifiedName: String = v1.asInstanceOf[String]
+ val (funcName, database) = buildFunctionIdentFromQualifiedName(qualifiedName)
+ Function(database, funcName)
+ }
+}
+
/**
* org.apache.spark.sql.catalyst.FunctionIdentifier
*/
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala
index 4c5e9dc8452..193a00fa584 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala
@@ -19,8 +19,11 @@ package org.apache.kyuubi.plugin.spark.authz.serde
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.FunctionIdentifier
+import org.apache.spark.sql.catalyst.catalog.SessionCatalog
+import org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor.buildFunctionIdentFromQualifiedName
import org.apache.kyuubi.plugin.spark.authz.serde.FunctionType.{FunctionType, PERMANENT, SYSTEM, TEMP}
+import org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor.getFunctionType
object FunctionType extends Enumeration {
type FunctionType = Value
@@ -33,6 +36,17 @@ object FunctionTypeExtractor {
val functionTypeExtractors: Map[String, FunctionTypeExtractor] = {
loadExtractorsToMap[FunctionTypeExtractor]
}
+
+ def getFunctionType(fi: FunctionIdentifier, catalog: SessionCatalog): FunctionType = {
+ fi match {
+ case permanent if catalog.isPersistentFunction(permanent) =>
+ PERMANENT
+ case system if catalog.isRegisteredFunction(system) =>
+ SYSTEM
+ case _ =>
+ TEMP
+ }
+ }
}
/**
@@ -66,14 +80,18 @@ class FunctionIdentifierFunctionTypeExtractor extends FunctionTypeExtractor {
override def apply(v1: AnyRef, spark: SparkSession): FunctionType = {
val catalog = spark.sessionState.catalog
val fi = v1.asInstanceOf[FunctionIdentifier]
- if (catalog.isTemporaryFunction(fi)) {
- TEMP
- } else if (catalog.isPersistentFunction(fi)) {
- PERMANENT
- } else if (catalog.isRegisteredFunction(fi)) {
- SYSTEM
- } else {
- TEMP
- }
+ getFunctionType(fi, catalog)
+ }
+}
+
+/**
+ * String
+ */
+class FunctionNameFunctionTypeExtractor extends FunctionTypeExtractor {
+ override def apply(v1: AnyRef, spark: SparkSession): FunctionType = {
+ val catalog: SessionCatalog = spark.sessionState.catalog
+ val qualifiedName: String = v1.asInstanceOf[String]
+ val (funcName, database) = buildFunctionIdentFromQualifiedName(qualifiedName)
+ getFunctionType(FunctionIdentifier(funcName, database), catalog)
}
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala
index a52a558a00a..07f91a95d99 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala
@@ -66,9 +66,10 @@ package object serde {
}
final private lazy val SCAN_SPECS: Map[String, ScanSpec] = {
- val is = getClass.getClassLoader.getResourceAsStream("scan_command_spec.json")
+ val is = getClass.getClassLoader.getResourceAsStream("scan_spec.json")
mapper.readValue(is, new TypeReference[Array[ScanSpec]] {})
- .map(e => (e.classname, e)).toMap
+ .map(e => (e.classname, e))
+ .filter(t => t._2.scanDescs.nonEmpty).toMap
}
def isKnownScan(r: AnyRef): Boolean = {
@@ -79,6 +80,21 @@ package object serde {
SCAN_SPECS(r.getClass.getName)
}
+ final private lazy val FUNCTION_SPECS: Map[String, ScanSpec] = {
+ val is = getClass.getClassLoader.getResourceAsStream("scan_spec.json")
+ mapper.readValue(is, new TypeReference[Array[ScanSpec]] {})
+ .map(e => (e.classname, e))
+ .filter(t => t._2.functionDescs.nonEmpty).toMap
+ }
+
+ def isKnowFunction(r: AnyRef): Boolean = {
+ FUNCTION_SPECS.contains(r.getClass.getName)
+ }
+
+ def getFunctionSpec(r: AnyRef): ScanSpec = {
+ FUNCTION_SPECS(r.getClass.getName)
+ }
+
def operationType(plan: LogicalPlan): OperationType = {
val classname = plan.getClass.getName
TABLE_COMMAND_SPECS.get(classname)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala
new file mode 100644
index 00000000000..e8da4e87168
--- /dev/null
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.plugin.spark.authz
+
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
+// scalastyle:off
+import org.scalatest.funsuite.AnyFunSuite
+
+import org.apache.kyuubi.plugin.spark.authz.OperationType.QUERY
+import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType
+
+abstract class FunctionPrivilegesBuilderSuite extends AnyFunSuite
+ with SparkSessionProvider with BeforeAndAfterAll with BeforeAndAfterEach {
+ // scalastyle:on
+
+ protected def withTable(t: String)(f: String => Unit): Unit = {
+ try {
+ f(t)
+ } finally {
+ sql(s"DROP TABLE IF EXISTS $t")
+ }
+ }
+
+ protected def withDatabase(t: String)(f: String => Unit): Unit = {
+ try {
+ f(t)
+ } finally {
+ sql(s"DROP DATABASE IF EXISTS $t")
+ }
+ }
+
+ protected def checkColumns(plan: LogicalPlan, cols: Seq[String]): Unit = {
+ val (in, out, _) = PrivilegesBuilder.build(plan, spark)
+ assert(out.isEmpty, "Queries shall not check output privileges")
+ val po = in.head
+ assert(po.actionType === PrivilegeObjectActionType.OTHER)
+ assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
+ assert(po.columns === cols)
+ }
+
+ protected def checkColumns(query: String, cols: Seq[String]): Unit = {
+ checkColumns(sql(query).queryExecution.optimizedPlan, cols)
+ }
+
+ protected val reusedDb: String = getClass.getSimpleName
+ protected val reusedDb2: String = getClass.getSimpleName + "2"
+ protected val reusedTable: String = reusedDb + "." + getClass.getSimpleName
+ protected val reusedTableShort: String = reusedTable.split("\\.").last
+ protected val reusedPartTable: String = reusedTable + "_part"
+ protected val reusedPartTableShort: String = reusedPartTable.split("\\.").last
+ protected val functionCount = 3
+ protected val functionNamePrefix = "kyuubi_fun_"
+ protected val tempFunNamePrefix = "kyuubi_temp_fun_"
+
+ override def beforeAll(): Unit = {
+ sql(s"CREATE DATABASE IF NOT EXISTS $reusedDb")
+ sql(s"CREATE DATABASE IF NOT EXISTS $reusedDb2")
+ sql(s"CREATE TABLE IF NOT EXISTS $reusedTable" +
+ s" (key int, value string) USING parquet")
+ sql(s"CREATE TABLE IF NOT EXISTS $reusedPartTable" +
+ s" (key int, value string, pid string) USING parquet" +
+ s" PARTITIONED BY(pid)")
+ // scalastyle:off
+ (0 until functionCount).foreach { index =>
+ {
+ sql(s"CREATE FUNCTION ${reusedDb}.${functionNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'")
+ sql(s"CREATE FUNCTION ${reusedDb2}.${functionNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'")
+ sql(s"CREATE TEMPORARY FUNCTION ${tempFunNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'")
+ }
+ }
+ sql(s"USE ${reusedDb2}")
+ // scalastyle:on
+ super.beforeAll()
+ }
+
+ override def afterAll(): Unit = {
+ Seq(reusedTable, reusedPartTable).foreach { t =>
+ sql(s"DROP TABLE IF EXISTS $t")
+ }
+
+ Seq(reusedDb, reusedDb2).foreach { db =>
+ (0 until functionCount).foreach { index =>
+ sql(s"DROP FUNCTION ${db}.${functionNamePrefix}${index}")
+ }
+ sql(s"DROP DATABASE IF EXISTS ${db}")
+ }
+
+ spark.stop()
+ super.afterAll()
+ }
+}
+
+class HiveFunctionPrivilegesBuilderSuite extends FunctionPrivilegesBuilderSuite {
+
+ override protected val catalogImpl: String = "hive"
+
+ test("Function Call Query") {
+ val plan = sql(s"SELECT kyuubi_fun_1('data'), " +
+ s"kyuubi_fun_2(value), " +
+ s"${reusedDb}.kyuubi_fun_0(value), " +
+ s"kyuubi_temp_fun_1('data2')," +
+ s"kyuubi_temp_fun_2(key) " +
+ s"FROM $reusedTable").queryExecution.analyzed
+ val (inputs, _, _) = PrivilegesBuilder.buildFunctionPrivileges(plan, spark)
+ assert(inputs.size === 3)
+ inputs.foreach { po =>
+ assert(po.actionType === PrivilegeObjectActionType.OTHER)
+ assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
+ assert(po.dbname startsWith reusedDb.toLowerCase)
+ assert(po.objectName startsWith functionNamePrefix.toLowerCase)
+ val accessType = ranger.AccessType(po, QUERY, isInput = true)
+ assert(accessType === AccessType.SELECT)
+ }
+ }
+
+ test("Function Call Query with Quoted Name") {
+ val plan = sql(s"SELECT `kyuubi_fun_1`('data'), " +
+ s"`kyuubi_fun_2`(value), " +
+ s"`${reusedDb}`.`kyuubi_fun_0`(value), " +
+ s"`kyuubi_temp_fun_1`('data2')," +
+ s"`kyuubi_temp_fun_2`(key) " +
+ s"FROM $reusedTable").queryExecution.analyzed
+ val (inputs, _, _) = PrivilegesBuilder.buildFunctionPrivileges(plan, spark)
+ assert(inputs.size === 3)
+ inputs.foreach { po =>
+ assert(po.actionType === PrivilegeObjectActionType.OTHER)
+ assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
+ assert(po.dbname startsWith reusedDb.toLowerCase)
+ assert(po.objectName startsWith functionNamePrefix.toLowerCase)
+ val accessType = ranger.AccessType(po, QUERY, isInput = true)
+ assert(accessType === AccessType.SELECT)
+ }
+ }
+
+ test("Simple Function Call Query") {
+ val plan = sql(s"SELECT kyuubi_fun_1('data'), " +
+ s"kyuubi_fun_0('value'), " +
+ s"${reusedDb}.kyuubi_fun_0('value'), " +
+ s"${reusedDb}.kyuubi_fun_2('value'), " +
+ s"kyuubi_temp_fun_1('data2')," +
+ s"kyuubi_temp_fun_2('key') ").queryExecution.analyzed
+ val (inputs, _, _) = PrivilegesBuilder.buildFunctionPrivileges(plan, spark)
+ assert(inputs.size === 4)
+ inputs.foreach { po =>
+ assert(po.actionType === PrivilegeObjectActionType.OTHER)
+ assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
+ assert(po.dbname startsWith reusedDb.toLowerCase)
+ assert(po.objectName startsWith functionNamePrefix.toLowerCase)
+ val accessType = ranger.AccessType(po, QUERY, isInput = true)
+ assert(accessType === AccessType.SELECT)
+ }
+ }
+
+ test("Function Call In CAST Command") {
+ val table = "castTable"
+ withTable(table) { table =>
+ val plan = sql(s"CREATE TABLE ${table} " +
+ s"SELECT kyuubi_fun_1('data') col1, " +
+ s"${reusedDb2}.kyuubi_fun_2(value) col2, " +
+ s"kyuubi_fun_0(value) col3, " +
+ s"kyuubi_fun_2('value') col4, " +
+ s"${reusedDb}.kyuubi_fun_2('value') col5, " +
+ s"${reusedDb}.kyuubi_fun_1('value') col6, " +
+ s"kyuubi_temp_fun_1('data2') col7, " +
+ s"kyuubi_temp_fun_2(key) col8 " +
+ s"FROM ${reusedTable} WHERE ${reusedDb2}.kyuubi_fun_1(key)='123'").queryExecution.analyzed
+ val (inputs, _, _) = PrivilegesBuilder.buildFunctionPrivileges(plan, spark)
+ assert(inputs.size === 7)
+ inputs.foreach { po =>
+ assert(po.actionType === PrivilegeObjectActionType.OTHER)
+ assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
+ assert(po.dbname startsWith reusedDb.toLowerCase)
+ assert(po.objectName startsWith functionNamePrefix.toLowerCase)
+ val accessType = ranger.AccessType(po, QUERY, isInput = true)
+ assert(accessType === AccessType.SELECT)
+ }
+ }
+ }
+
+}
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
index 7c7ed138b27..e20cd13d7b4 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
@@ -34,13 +34,16 @@ object JsonSpecFileGenerator {
writeCommandSpecJson("database", DatabaseCommands.data)
writeCommandSpecJson("table", TableCommands.data ++ IcebergCommands.data)
writeCommandSpecJson("function", FunctionCommands.data)
- writeCommandSpecJson("scan", Scans.data)
+ writeCommandSpecJson("scan", Scans.data, isScanResource = true)
}
- def writeCommandSpecJson[T <: CommandSpec](commandType: String, specArr: Array[T]): Unit = {
+ def writeCommandSpecJson[T <: CommandSpec](
+ commandType: String,
+ specArr: Array[T],
+ isScanResource: Boolean = false): Unit = {
val pluginHome = getClass.getProtectionDomain.getCodeSource.getLocation.getPath
.split("target").head
- val filename = s"${commandType}_command_spec.json"
+ val filename = s"${commandType}${if (isScanResource) "" else "_command"}_spec.json"
val writer = {
val p = Paths.get(pluginHome, "src", "main", "resources", filename)
Files.newBufferedWriter(p, StandardCharsets.UTF_8)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala
index 7bd8260bba5..b2c1868a26d 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala
@@ -18,6 +18,7 @@
package org.apache.kyuubi.plugin.spark.authz.gen
import org.apache.kyuubi.plugin.spark.authz.serde._
+import org.apache.kyuubi.plugin.spark.authz.serde.FunctionType._
object Scans {
@@ -57,9 +58,34 @@ object Scans {
ScanSpec(r, Seq(tableDesc))
}
+ val HiveSimpleUDF = {
+ ScanSpec(
+ "org.apache.spark.sql.hive.HiveSimpleUDF",
+ Seq.empty,
+ Seq(FunctionDesc(
+ "name",
+ classOf[QualifiedNameStringFunctionExtractor],
+ functionTypeDesc = Some(FunctionTypeDesc(
+ "name",
+ classOf[FunctionNameFunctionTypeExtractor],
+ Seq(TEMP, SYSTEM))),
+ isInput = true)))
+ }
+
+ val HiveGenericUDF = HiveSimpleUDF.copy(classname = "org.apache.spark.sql.hive.HiveGenericUDF")
+
+ val HiveUDAFFunction = HiveSimpleUDF.copy(classname =
+ "org.apache.spark.sql.hive.HiveUDAFFunction")
+
+ val HiveGenericUDTF = HiveSimpleUDF.copy(classname = "org.apache.spark.sql.hive.HiveGenericUDTF")
+
val data: Array[ScanSpec] = Array(
HiveTableRelation,
LogicalRelation,
DataSourceV2Relation,
- PermanentViewMarker)
+ PermanentViewMarker,
+ HiveSimpleUDF,
+ HiveGenericUDF,
+ HiveUDAFFunction,
+ HiveGenericUDTF)
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
index 7bf01b43f89..4f971ba6201 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
@@ -637,7 +637,7 @@ object TableCommands {
"org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand"),
InsertIntoHadoopFsRelationCommand,
InsertIntoDataSourceDir.copy(classname =
- "org.apache.spark.sql.execution.datasources.InsertIntoDataSourceDirCommand"),
+ "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand"),
InsertIntoHiveTable,
LoadData,
MergeIntoTable,
diff --git a/pom.xml b/pom.xml
index f772bd1b437..520b181d52f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2337,7 +2337,7 @@
extensions/spark/kyuubi-extension-spark-3-2
- 3.2.3
+ 3.2.43.22.0.2spark-${spark.version}-bin-hadoop3.2.tgz
From 3ac8df83243836623b20fbb3d9fadd5f14dd8560 Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Mon, 17 Apr 2023 09:42:25 +0800
Subject: [PATCH 041/404] [KYUUBI #4695] Bump super-linter action from v4 to v5
### _Why are the changes needed?_
- super-liner action v5.0.0 release notes: https://github.com/github/super-linter/releases/tag/v5.0.0
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4695 from bowenliang123/superlinter-v5.
Closes #4695
7cd3095a9 [liangbowen] update
7826c8043 [liangbowen] update
89e7b3d7d [liangbowen] bump superlinter to v5
Authored-by: liangbowen
Signed-off-by: liangbowen
---
.github/workflows/style.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index e45b826fccc..78cbe655a30 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -109,7 +109,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Super Linter Checks
- uses: github/super-linter/slim@v4
+ uses: github/super-linter/slim@v5
env:
CREATE_LOG_FILE: true
ERROR_ON_MISSING_EXEC_BIT: true
From 57b06112657f1fd87447098e166205074426a908 Mon Sep 17 00:00:00 2001
From: huangzhir <306824224@qq.com>
Date: Mon, 17 Apr 2023 09:44:47 +0800
Subject: [PATCH 042/404] [KYUUBI #4713][TEST] Fix false positive result in
SchedulerPoolSuite
### _Why are the changes needed?_
fix issuse https://github.com/apache/kyuubi/issues/4713
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [X] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4714 from huangzhir/fixtest-schedulerpool.
Closes #4713
e66ede214 [huangzhir] fixbug TEST SchedulerPoolSuite a false positive result
Authored-by: huangzhir <306824224@qq.com>
Signed-off-by: Cheng Pan
---
.../engine/spark/SchedulerPoolSuite.scala | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
index af8c90cf29e..d42b7f4d510 100644
--- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
+++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
@@ -19,6 +19,8 @@ package org.apache.kyuubi.engine.spark
import java.util.concurrent.Executors
+import scala.concurrent.duration.SECONDS
+
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd, SparkListenerJobStart}
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.SpanSugar.convertIntToGrainOfTime
@@ -80,6 +82,7 @@ class SchedulerPoolSuite extends WithSparkSQLEngine with HiveJDBCTestHelper {
threads.execute(() => {
priority match {
case 0 =>
+ // job name job2
withJdbcStatement() { statement =>
statement.execute("SET kyuubi.operation.scheduler.pool=p0")
statement.execute("SELECT java_method('java.lang.Thread', 'sleep', 1500l)" +
@@ -92,17 +95,18 @@ class SchedulerPoolSuite extends WithSparkSQLEngine with HiveJDBCTestHelper {
statement.execute("SELECT java_method('java.lang.Thread', 'sleep', 1500l)" +
" FROM range(1, 3, 1, 2)")
}
+ // make sure this job name job1
+ Thread.sleep(1000)
}
})
}
threads.shutdown()
- eventually(Timeout(20.seconds)) {
- // We can not ensure that job1 is started before job2 so here using abs.
- assert(Math.abs(job1StartTime - job2StartTime) < 1000)
- // Job1 minShare is 2(total resource) so that job2 should be allocated tasks after
- // job1 finished.
- assert(job2FinishTime - job1FinishTime >= 1000)
- }
+ threads.awaitTermination(20, SECONDS)
+ // because after job1 submitted, sleep 1s, so job1 should be started before job2
+ assert(job1StartTime < job2StartTime)
+ // job2 minShare is 2(total resource) so that job1 should be allocated tasks after
+ // job2 finished.
+ assert(job2FinishTime < job1FinishTime)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
From cdbe05fa4c6a18cd52185f3152b8648f676b240d Mon Sep 17 00:00:00 2001
From: Fu Chen
Date: Mon, 17 Apr 2023 16:52:54 +0800
Subject: [PATCH 043/404] [KYUUBI #4720] [ARROW] Fix
java.lang.NoSuchFieldError: IpcOption.DEFAULT for Spark-3.1/3.2
### _Why are the changes needed?_
`IpcOption.DEFAULT` was introduced in [ARROW-11081](https://github.com/apache/arrow/pull/9053)(ARROW-4.0.0), add `ARROW_IPC_OPTION_DEFAULT` for adapt Spark-3.1/3.2
```
Caused by: java.lang.NoSuchFieldError: DEFAULT
at org.apache.spark.sql.execution.arrow.KyuubiArrowConverters$ArrowBatchIterator.$anonfun$next$1(KyuubiArrowConverters.scala:304)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1491)
at org.apache.spark.sql.execution.arrow.KyuubiArrowConverters$ArrowBatchIterator.next(KyuubiArrowConverters.scala:308)
at org.apache.spark.sql.execution.arrow.KyuubiArrowConverters$ArrowBatchIterator.next(KyuubiArrowConverters.scala:231)
at scala.collection.Iterator.foreach(Iterator.scala:943)
at scala.collection.Iterator.foreach$(Iterator.scala:943)
at org.apache.spark.sql.execution.arrow.KyuubiArrowConverters$ArrowBatchIterator.foreach(KyuubiArrowConverters.scala:231)
```
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4720 from cfmcgrady/arrow-ipc-option.
Closes #4720
2c80e670e [Fu Chen] fix style
a8294f637 [Fu Chen] add ARROW_IPC_OPTION_DEFAULT
Authored-by: Fu Chen
Signed-off-by: Fu Chen
---
.../spark/sql/execution/arrow/KyuubiArrowConverters.scala | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
index dd6163ec97c..2feadbced56 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
@@ -299,7 +299,7 @@ object KyuubiArrowConverters extends SQLConfHelper with Logging {
MessageSerializer.serialize(writeChannel, batch)
// Always write the Ipc options at the end.
- ArrowStreamWriter.writeEndOfStream(writeChannel, IpcOption.DEFAULT)
+ ArrowStreamWriter.writeEndOfStream(writeChannel, ARROW_IPC_OPTION_DEFAULT)
batch.close()
} {
@@ -318,4 +318,7 @@ object KyuubiArrowConverters extends SQLConfHelper with Logging {
context: TaskContext): Iterator[InternalRow] = {
ArrowConverters.fromBatchIterator(arrowBatchIter, schema, timeZoneId, context)
}
+
+ // IpcOption.DEFAULT was introduced in ARROW-11081(ARROW-4.0.0), add this for adapt Spark-3.1/3.2
+ final private val ARROW_IPC_OPTION_DEFAULT = new IpcOption()
}
From 17514a3acaf5e6b4c3a424e839e07854e674dd4c Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Mon, 17 Apr 2023 16:55:56 +0800
Subject: [PATCH 044/404] Revert "[KYUUBI #4713][TEST] Fix false positive
result in SchedulerPoolSuite"
This reverts commit 57b06112657f1fd87447098e166205074426a908.
---
.../engine/spark/SchedulerPoolSuite.scala | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
index d42b7f4d510..af8c90cf29e 100644
--- a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
+++ b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
@@ -19,8 +19,6 @@ package org.apache.kyuubi.engine.spark
import java.util.concurrent.Executors
-import scala.concurrent.duration.SECONDS
-
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd, SparkListenerJobStart}
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.SpanSugar.convertIntToGrainOfTime
@@ -82,7 +80,6 @@ class SchedulerPoolSuite extends WithSparkSQLEngine with HiveJDBCTestHelper {
threads.execute(() => {
priority match {
case 0 =>
- // job name job2
withJdbcStatement() { statement =>
statement.execute("SET kyuubi.operation.scheduler.pool=p0")
statement.execute("SELECT java_method('java.lang.Thread', 'sleep', 1500l)" +
@@ -95,18 +92,17 @@ class SchedulerPoolSuite extends WithSparkSQLEngine with HiveJDBCTestHelper {
statement.execute("SELECT java_method('java.lang.Thread', 'sleep', 1500l)" +
" FROM range(1, 3, 1, 2)")
}
- // make sure this job name job1
- Thread.sleep(1000)
}
})
}
threads.shutdown()
- threads.awaitTermination(20, SECONDS)
- // because after job1 submitted, sleep 1s, so job1 should be started before job2
- assert(job1StartTime < job2StartTime)
- // job2 minShare is 2(total resource) so that job1 should be allocated tasks after
- // job2 finished.
- assert(job2FinishTime < job1FinishTime)
+ eventually(Timeout(20.seconds)) {
+ // We can not ensure that job1 is started before job2 so here using abs.
+ assert(Math.abs(job1StartTime - job2StartTime) < 1000)
+ // Job1 minShare is 2(total resource) so that job2 should be allocated tasks after
+ // job1 finished.
+ assert(job2FinishTime - job1FinishTime >= 1000)
+ }
} finally {
spark.sparkContext.removeSparkListener(listener)
}
From 553b2aafe7a29546ee410f44640d8e92cdf58951 Mon Sep 17 00:00:00 2001
From: Paul Lin
Date: Mon, 17 Apr 2023 20:11:22 +0800
Subject: [PATCH 045/404] [KYUUBI #4367] Support Flink 1.17
### _Why are the changes needed?_
Support Flink 1.17 and Flink SQL gateway.
1. Drop Flink 1.15
2. Migrate API to Flink SQL Gateway
3. Support Flink 1.17
### _How was this patch tested?_
- [x] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4368 from link3280/flink-1.17.
Closes #4367
8eb4da6c0 [Paul Lin] [KYUUBI #4367] Fix test failure
81a10f6be [Paul Lin] [KYUUBI #4367] Fix test failure
23d87ba1d [Paul Lin] [KYUUBI #4367] Rename delegation package to shim
5c9d0aa84 [Paul Lin] [KYUUBI #4367] Improve code style
56567fcd7 [Paul Lin] [KYUUBI #4367] Improve java.long.Long usage
417d37b27 [Paul Lin] Update externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala
08f89991a [Paul Lin] [KYUUBI #4367] Fix ambiguous reference
ed950600c [Paul Lin] [KYUUBI #4367] Fix spotless
7b28eaf11 [Paul Lin] [KYUUBI #4367] Improve code style for iterations
c2a23d571 [Paul Lin] [KYUUBI #4367] Improve code style for error messages and iterations
7e36e70c7 [Paul Lin] [KYUUBI #4367] Improve code style for java.lang.Boolean
4ef8c5209 [Paul Lin] [KYUUBI #4367] Improve code style for java.util.*
8530aec2a [Paul Lin] [KYUUBI #4367] Remove unnecessary classes
1c41e4400 [Paul Lin] [KYUUBI #4367] Remove unnecessary variables
33eeb37ee [Paul Lin] [KYUUBI #4367] Remove unnecessary reflection code
e1e5cd2cf [Paul Lin] [KYUUBI #4367] Fix IncompatibleClassChangeError
3520a5153 [Paul Lin] [KYUUBI #4367] Fix IncompatibleClassChangeError
42cce7a54 [Paul Lin] [KYUUBI #4367] Replace vanilla reflection with kyuubi refection tools
20e9913e3 [Paul Lin] [KYUUBI #4367] Fix FlinkProcessBuilder test error
a02e01adf [Paul Lin] [KYUUBI #4367] Improve code style
20e1a559e [Paul Lin] [KYUUBI #4367] Use kyuubi refection tools
9b2072e45 [Paul Lin] [KYUUBI #4367] Improve flink version match
7ce1e9a12 [Paul Lin] [KYUUBI #4367] Fix local engine tagged as YARN app
fd0c88d15 [Paul Lin] Revert "[KYUUBI #4367] Filter out non kyuubi prefixed conf in flink login engine"
f71c6014e [Paul Lin] [KYUUBI #4367] Fix local engine tagged as YARN app
b7d46f57d [Paul Lin] [KYUUBI #4367] Filter out non kyuubi prefixed conf in flink login engine
47beb1a78 [Paul Lin] [KYUUBI #4367] Refactor Flink engine tests
7e1a198ca [Paul Lin] [KYUUBI #4367] Fix flink sql gateway jar not included in local mode
e851d9732 [Paul Lin] [KYUUBI #4367] Disable query id test for flink 1.16
7291e27fa [Paul Lin] [KYUUBI #4367] Remove profile for flink-1.15
54cfe3bbc [Paul Lin] [KYUUBI #4367] Fix udf not found in local flink engine tests
1a7833bf2 [Paul Lin] [KYUUBI #4367] Fix test failure in PlanOnlyStatementSuit
700ee04db [Paul Lin] [KYUUBI #4367] Fix FLINK_CONF_DIR not set in ut
b685ff139 [Paul Lin] [KYUUBI #4367] Improve code style
29728c042 [Paul Lin] [KYUUBI #4367] Fix Flink conf dir not found
799c93876 [Paul Lin] [KYUUBI #4367] Fix NoSuchFieldException
614ecc335 [Paul Lin] [KYUUBI #4367] Fix reflection failures
6a08d0bbe [Paul Lin] [KYUUBI #4367] Fix NPE in dependencies
d289495c0 [Paul Lin] [KYUUBI #4367] Flink FlinkSQLEngine capabilities with Flink 1.16
ef6f4d4ff [Paul Lin] [KYUUBI #4367] Remove support for Flink 1.15
e18b3c2ed [Paul Lin] [KYUUBI #4367] Fix Flink SessionManager compatibility issue
49e0a94be [Paul Lin] feat: Support Flink 1.17
Authored-by: Paul Lin
Signed-off-by: Cheng Pan
---
.github/workflows/master.yml | 8 +-
externals/kyuubi-flink-sql-engine/pom.xml | 6 +
.../engine/flink/FlinkEngineUtils.scala | 151 +++++++++----
.../engine/flink/FlinkSQLBackendService.scala | 2 +-
.../kyuubi/engine/flink/FlinkSQLEngine.scala | 55 +----
.../flink/operation/ExecuteStatement.scala | 203 +-----------------
.../flink/operation/FlinkOperation.scala | 13 +-
.../operation/FlinkSQLOperationManager.scala | 14 +-
.../engine/flink/operation/GetCatalogs.scala | 6 +-
.../engine/flink/operation/GetColumns.scala | 11 +-
.../flink/operation/GetCurrentCatalog.scala | 3 +-
.../flink/operation/GetCurrentDatabase.scala | 3 +-
.../engine/flink/operation/GetFunctions.scala | 18 +-
.../flink/operation/GetPrimaryKeys.scala | 19 +-
.../engine/flink/operation/GetSchemas.scala | 11 +-
.../engine/flink/operation/GetTables.scala | 6 +-
.../flink/operation/OperationUtils.scala | 172 ---------------
.../flink/operation/PlanOnlyStatement.scala | 28 +--
.../flink/operation/SetCurrentCatalog.scala | 4 +-
.../flink/operation/SetCurrentDatabase.scala | 4 +-
.../engine/flink/result/ResultSet.scala | 13 +-
.../engine/flink/result/ResultSetUtil.scala | 71 ++++++
.../session/FlinkSQLSessionManager.scala | 33 ++-
.../flink/session/FlinkSessionImpl.scala | 21 +-
.../engine/flink/shim/FlinkResultSet.scala | 78 +++++++
.../flink/shim/FlinkSessionManager.scala | 130 +++++++++++
.../flink/WithDiscoveryFlinkSQLEngine.scala | 26 +--
.../flink/WithFlinkSQLEngineLocal.scala | 190 +++++++++++++---
.../flink/WithFlinkSQLEngineOnYarn.scala | 2 +-
.../engine/flink/WithFlinkTestResources.scala | 7 +-
.../operation/FlinkOperationLocalSuite.scala | 30 ++-
.../operation/FlinkOperationOnYarnSuite.scala | 25 ++-
.../flink/operation/FlinkOperationSuite.scala | 61 ++++--
.../operation/PlanOnlyOperationSuite.scala | 22 +-
.../engine/KyuubiApplicationManager.scala | 2 +-
.../engine/flink/FlinkProcessBuilder.scala | 21 +-
.../flink/FlinkProcessBuilderSuite.scala | 5 +-
pom.xml | 16 +-
38 files changed, 844 insertions(+), 646 deletions(-)
delete mode 100644 externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/OperationUtils.scala
create mode 100644 externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkResultSet.scala
create mode 100644 externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkSessionManager.scala
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index ece87e26526..04ecb1a601a 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -157,15 +157,15 @@ jobs:
- 8
- 11
flink:
- - '1.15'
- '1.16'
+ - '1.17'
flink-archive: [ "" ]
comment: [ "normal" ]
include:
- java: 8
- flink: '1.16'
- flink-archive: '-Dflink.archive.mirror=https://archive.apache.org/dist/flink/flink-1.15.4 -Dflink.archive.name=flink-1.15.4-bin-scala_2.12.tgz'
- comment: 'verify-on-flink-1.15-binary'
+ flink: '1.17'
+ flink-archive: '-Dflink.archive.mirror=https://archive.apache.org/dist/flink/flink-1.16.1 -Dflink.archive.name=flink-1.16.1-bin-scala_2.12.tgz'
+ comment: 'verify-on-flink-1.16-binary'
steps:
- uses: actions/checkout@v3
- name: Tune Runner VM
diff --git a/externals/kyuubi-flink-sql-engine/pom.xml b/externals/kyuubi-flink-sql-engine/pom.xml
index 0e499f9785b..c73310a64cf 100644
--- a/externals/kyuubi-flink-sql-engine/pom.xml
+++ b/externals/kyuubi-flink-sql-engine/pom.xml
@@ -75,6 +75,12 @@
provided
+
+ org.apache.flink
+ flink-sql-gateway
+ provided
+
+
org.apache.flinkflink-table-common
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala
index 69fc8c69573..f9289ea8153 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkEngineUtils.scala
@@ -18,29 +18,37 @@
package org.apache.kyuubi.engine.flink
import java.io.File
+import java.lang.{Boolean => JBoolean}
import java.net.URL
+import java.util.{ArrayList => JArrayList, Collections => JCollections, List => JList}
import scala.collection.JavaConverters._
+import scala.collection.convert.ImplicitConversions._
-import org.apache.commons.cli.{CommandLine, DefaultParser, Option, Options, ParseException}
+import org.apache.commons.cli.{CommandLine, DefaultParser, Options}
+import org.apache.flink.api.common.JobID
+import org.apache.flink.client.cli.{CustomCommandLine, DefaultCLI, GenericCLI}
+import org.apache.flink.configuration.Configuration
import org.apache.flink.core.fs.Path
import org.apache.flink.runtime.util.EnvironmentInformation
import org.apache.flink.table.client.SqlClientException
-import org.apache.flink.table.client.cli.CliOptions
+import org.apache.flink.table.client.cli.CliOptionsParser
import org.apache.flink.table.client.cli.CliOptionsParser._
-import org.apache.flink.table.client.gateway.context.SessionContext
-import org.apache.flink.table.client.gateway.local.LocalExecutor
+import org.apache.flink.table.gateway.service.context.{DefaultContext, SessionContext}
+import org.apache.flink.table.gateway.service.result.ResultFetcher
+import org.apache.flink.table.gateway.service.session.Session
+import org.apache.flink.util.JarUtils
-import org.apache.kyuubi.Logging
+import org.apache.kyuubi.{KyuubiException, Logging}
import org.apache.kyuubi.engine.SemanticVersion
+import org.apache.kyuubi.reflection.{DynConstructors, DynFields, DynMethods}
object FlinkEngineUtils extends Logging {
- val MODE_EMBEDDED = "embedded"
- val EMBEDDED_MODE_CLIENT_OPTIONS: Options = getEmbeddedModeClientOptions(new Options);
+ val EMBEDDED_MODE_CLIENT_OPTIONS: Options = getEmbeddedModeClientOptions(new Options)
val SUPPORTED_FLINK_VERSIONS: Array[SemanticVersion] =
- Array("1.15", "1.16").map(SemanticVersion.apply)
+ Array("1.16", "1.17").map(SemanticVersion.apply)
def checkFlinkVersion(): Unit = {
val flinkVersion = EnvironmentInformation.getVersion
@@ -62,47 +70,106 @@ object FlinkEngineUtils extends Logging {
def isFlinkVersionEqualTo(targetVersionString: String): Boolean =
SemanticVersion(EnvironmentInformation.getVersion).isVersionEqualTo(targetVersionString)
- def parseCliOptions(args: Array[String]): CliOptions = {
- val (mode, modeArgs) =
- if (args.isEmpty || args(0).startsWith("-")) (MODE_EMBEDDED, args)
- else (args(0), args.drop(1))
- val options = parseEmbeddedModeClient(modeArgs)
- if (mode == MODE_EMBEDDED) {
- if (options.isPrintHelp) {
- printHelpEmbeddedModeClient()
+ /**
+ * Copied and modified from [[org.apache.flink.table.client.cli.CliOptionsParser]]
+ * to avoid loading flink-python classes which we doesn't support yet.
+ */
+ private def discoverDependencies(
+ jars: JList[URL],
+ libraries: JList[URL]): JList[URL] = {
+ val dependencies: JList[URL] = new JArrayList[URL]
+ try { // find jar files
+ for (url <- jars) {
+ JarUtils.checkJarFile(url)
+ dependencies.add(url)
}
- options
+ // find jar files in library directories
+ libraries.foreach { libUrl =>
+ val dir: File = new File(libUrl.toURI)
+ if (!dir.isDirectory) throw new SqlClientException(s"Directory expected: $dir")
+ if (!dir.canRead) throw new SqlClientException(s"Directory cannot be read: $dir")
+ val files: Array[File] = dir.listFiles
+ if (files == null) throw new SqlClientException(s"Directory cannot be read: $dir")
+ files.filter { f => f.isFile && f.getAbsolutePath.toLowerCase.endsWith(".jar") }
+ .foreach { f =>
+ val url: URL = f.toURI.toURL
+ JarUtils.checkJarFile(url)
+ dependencies.add(url)
+ }
+ }
+ } catch {
+ case e: Exception =>
+ throw new SqlClientException("Could not load all required JAR files.", e)
+ }
+ dependencies
+ }
+
+ def getDefaultContext(
+ args: Array[String],
+ flinkConf: Configuration,
+ flinkConfDir: String): DefaultContext = {
+ val parser = new DefaultParser
+ val line = parser.parse(EMBEDDED_MODE_CLIENT_OPTIONS, args, true)
+ val jars: JList[URL] = Option(checkUrls(line, CliOptionsParser.OPTION_JAR))
+ .getOrElse(JCollections.emptyList())
+ val libDirs: JList[URL] = Option(checkUrls(line, CliOptionsParser.OPTION_LIBRARY))
+ .getOrElse(JCollections.emptyList())
+ val dependencies: JList[URL] = discoverDependencies(jars, libDirs)
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ val commandLines: JList[CustomCommandLine] =
+ Seq(new GenericCLI(flinkConf, flinkConfDir), new DefaultCLI).asJava
+ DynConstructors.builder()
+ .impl(
+ classOf[DefaultContext],
+ classOf[Configuration],
+ classOf[JList[CustomCommandLine]])
+ .build()
+ .newInstance(flinkConf, commandLines)
+ .asInstanceOf[DefaultContext]
+ } else if (FlinkEngineUtils.isFlinkVersionEqualTo("1.17")) {
+ DynMethods.builder("load")
+ .impl(
+ classOf[DefaultContext],
+ classOf[Configuration],
+ classOf[JList[URL]],
+ classOf[Boolean],
+ classOf[Boolean])
+ .buildStatic()
+ .invoke[DefaultContext](
+ flinkConf,
+ dependencies,
+ new JBoolean(true),
+ new JBoolean(false))
} else {
- throw new SqlClientException("Other mode is not supported yet.")
+ throw new KyuubiException(
+ s"Flink version ${EnvironmentInformation.getVersion} are not supported currently.")
}
}
- def getSessionContext(localExecutor: LocalExecutor, sessionId: String): SessionContext = {
- val method = classOf[LocalExecutor].getDeclaredMethod("getSessionContext", classOf[String])
- method.setAccessible(true)
- method.invoke(localExecutor, sessionId).asInstanceOf[SessionContext]
+ def getSessionContext(session: Session): SessionContext = {
+ DynFields.builder()
+ .hiddenImpl(classOf[Session], "sessionContext")
+ .build()
+ .get(session)
+ .asInstanceOf[SessionContext]
}
- def parseEmbeddedModeClient(args: Array[String]): CliOptions =
+ def getResultJobId(resultFetch: ResultFetcher): Option[JobID] = {
+ if (FlinkEngineUtils.isFlinkVersionAtMost("1.16")) {
+ return None
+ }
try {
- val parser = new DefaultParser
- val line = parser.parse(EMBEDDED_MODE_CLIENT_OPTIONS, args, true)
- val jarUrls = checkUrls(line, OPTION_JAR)
- val libraryUrls = checkUrls(line, OPTION_LIBRARY)
- new CliOptions(
- line.hasOption(OPTION_HELP.getOpt),
- checkSessionId(line),
- checkUrl(line, OPTION_INIT_FILE),
- checkUrl(line, OPTION_FILE),
- if (jarUrls != null && jarUrls.nonEmpty) jarUrls.asJava else null,
- if (libraryUrls != null && libraryUrls.nonEmpty) libraryUrls.asJava else null,
- line.getOptionValue(OPTION_UPDATE.getOpt),
- line.getOptionValue(OPTION_HISTORY.getOpt),
- null)
+ Option(DynFields.builder()
+ .hiddenImpl(classOf[ResultFetcher], "jobID")
+ .build()
+ .get(resultFetch)
+ .asInstanceOf[JobID])
} catch {
- case e: ParseException =>
- throw new SqlClientException(e.getMessage)
+ case _: NullPointerException => None
+ case e: Throwable =>
+ throw new IllegalStateException("Unexpected error occurred while fetching query ID", e)
}
+ }
def checkSessionId(line: CommandLine): String = {
val sessionId = line.getOptionValue(OPTION_SESSION.getOpt)
@@ -111,13 +178,13 @@ object FlinkEngineUtils extends Logging {
} else sessionId
}
- def checkUrl(line: CommandLine, option: Option): URL = {
- val urls: List[URL] = checkUrls(line, option)
+ def checkUrl(line: CommandLine, option: org.apache.commons.cli.Option): URL = {
+ val urls: JList[URL] = checkUrls(line, option)
if (urls != null && urls.nonEmpty) urls.head
else null
}
- def checkUrls(line: CommandLine, option: Option): List[URL] = {
+ def checkUrls(line: CommandLine, option: org.apache.commons.cli.Option): JList[URL] = {
if (line.hasOption(option.getOpt)) {
line.getOptionValues(option.getOpt).distinct.map((url: String) => {
checkFilePath(url)
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLBackendService.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLBackendService.scala
index d049e3c80bf..9802f195546 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLBackendService.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLBackendService.scala
@@ -17,7 +17,7 @@
package org.apache.kyuubi.engine.flink
-import org.apache.flink.table.client.gateway.context.DefaultContext
+import org.apache.flink.table.gateway.service.context.DefaultContext
import org.apache.kyuubi.engine.flink.session.FlinkSQLSessionManager
import org.apache.kyuubi.service.AbstractBackendService
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala
index 42061a36959..48a354b0f95 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/FlinkSQLEngine.scala
@@ -18,22 +18,17 @@
package org.apache.kyuubi.engine.flink
import java.io.File
-import java.net.URL
import java.nio.file.Paths
import java.time.Instant
import java.util.concurrent.CountDownLatch
import scala.collection.JavaConverters._
-import scala.collection.mutable.ListBuffer
-import org.apache.flink.client.cli.{DefaultCLI, GenericCLI}
import org.apache.flink.configuration.{Configuration, DeploymentOptions, GlobalConfiguration}
import org.apache.flink.table.api.TableEnvironment
-import org.apache.flink.table.client.SqlClientException
-import org.apache.flink.table.client.gateway.context.DefaultContext
-import org.apache.flink.util.JarUtils
+import org.apache.flink.table.gateway.service.context.DefaultContext
-import org.apache.kyuubi.{KyuubiSQLException, Logging, Utils}
+import org.apache.kyuubi.{Logging, Utils}
import org.apache.kyuubi.Utils.{addShutdownHook, currentUser, FLINK_ENGINE_SHUTDOWN_PRIORITY}
import org.apache.kyuubi.config.KyuubiConf
import org.apache.kyuubi.engine.flink.FlinkSQLEngine.{countDownLatch, currentEngine}
@@ -118,18 +113,9 @@ object FlinkSQLEngine extends Logging {
debug(s"Skip generating app name for execution target $other")
}
- val cliOptions = FlinkEngineUtils.parseCliOptions(args)
- val jars = if (cliOptions.getJars != null) cliOptions.getJars.asScala else List.empty
- val libDirs =
- if (cliOptions.getLibraryDirs != null) cliOptions.getLibraryDirs.asScala else List.empty
- val dependencies = discoverDependencies(jars, libDirs)
- val engineContext = new DefaultContext(
- dependencies.asJava,
- flinkConf,
- Seq(new GenericCLI(flinkConf, flinkConfDir), new DefaultCLI).asJava)
-
kyuubiConf.setIfMissing(KyuubiConf.FRONTEND_THRIFT_BINARY_BIND_PORT, 0)
+ val engineContext = FlinkEngineUtils.getDefaultContext(args, flinkConf, flinkConfDir)
startEngine(engineContext)
info("Flink engine started")
@@ -146,7 +132,7 @@ object FlinkSQLEngine extends Logging {
engine.stop()
}
case t: Throwable =>
- error("Create FlinkSQL Engine Failed", t)
+ error("Failed to create FlinkSQL Engine", t)
}
}
@@ -167,37 +153,4 @@ object FlinkSQLEngine extends Logging {
res.await()
info("Initial Flink SQL finished.")
}
-
- private def discoverDependencies(
- jars: Seq[URL],
- libraries: Seq[URL]): List[URL] = {
- try {
- var dependencies: ListBuffer[URL] = ListBuffer()
- // find jar files
- jars.foreach { url =>
- JarUtils.checkJarFile(url)
- dependencies = dependencies += url
- }
- // find jar files in library directories
- libraries.foreach { libUrl =>
- val dir: File = new File(libUrl.toURI)
- if (!dir.isDirectory) throw new SqlClientException("Directory expected: " + dir)
- else if (!dir.canRead) throw new SqlClientException("Directory cannot be read: " + dir)
- val files: Array[File] = dir.listFiles
- if (files == null) throw new SqlClientException("Directory cannot be read: " + dir)
- files.foreach { f =>
- // only consider jars
- if (f.isFile && f.getAbsolutePath.toLowerCase.endsWith(".jar")) {
- val url: URL = f.toURI.toURL
- JarUtils.checkJarFile(url)
- dependencies = dependencies += url
- }
- }
- }
- dependencies.toList
- } catch {
- case e: Exception =>
- throw KyuubiSQLException(s"Could not load all required JAR files.", e)
- }
- }
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala
index 10ad5bf6d3a..4042756b623 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/ExecuteStatement.scala
@@ -17,32 +17,15 @@
package org.apache.kyuubi.engine.flink.operation
-import java.time.{LocalDate, LocalTime}
-import java.util
-
-import scala.collection.JavaConverters._
-import scala.collection.mutable.ArrayBuffer
-
import org.apache.flink.api.common.JobID
-import org.apache.flink.table.api.ResultKind
-import org.apache.flink.table.client.gateway.TypedResult
-import org.apache.flink.table.data.{GenericArrayData, GenericMapData, RowData}
-import org.apache.flink.table.data.binary.{BinaryArrayData, BinaryMapData}
-import org.apache.flink.table.operations.{ModifyOperation, Operation, QueryOperation}
-import org.apache.flink.table.operations.command._
-import org.apache.flink.table.types.DataType
-import org.apache.flink.table.types.logical._
-import org.apache.flink.types.Row
+import org.apache.flink.table.gateway.api.operation.OperationHandle
import org.apache.kyuubi.Logging
-import org.apache.kyuubi.engine.flink.FlinkEngineUtils._
-import org.apache.kyuubi.engine.flink.result.ResultSet
-import org.apache.kyuubi.engine.flink.schema.RowSet.toHiveString
+import org.apache.kyuubi.engine.flink.FlinkEngineUtils
+import org.apache.kyuubi.engine.flink.result.ResultSetUtil
import org.apache.kyuubi.operation.OperationState
import org.apache.kyuubi.operation.log.OperationLog
-import org.apache.kyuubi.reflection.DynMethods
import org.apache.kyuubi.session.Session
-import org.apache.kyuubi.util.RowSetUtils
class ExecuteStatement(
session: Session,
@@ -77,22 +60,11 @@ class ExecuteStatement(
private def executeStatement(): Unit = {
try {
setState(OperationState.RUNNING)
- val operation = executor.parseStatement(sessionId, statement)
- operation match {
- case queryOperation: QueryOperation => runQueryOperation(queryOperation)
- case modifyOperation: ModifyOperation => runModifyOperation(modifyOperation)
- case setOperation: SetOperation =>
- resultSet = OperationUtils.runSetOperation(setOperation, executor, sessionId)
- case resetOperation: ResetOperation =>
- resultSet = OperationUtils.runResetOperation(resetOperation, executor, sessionId)
- case addJarOperation: AddJarOperation if isFlinkVersionAtMost("1.15") =>
- resultSet = OperationUtils.runAddJarOperation(addJarOperation, executor, sessionId)
- case removeJarOperation: RemoveJarOperation =>
- resultSet = OperationUtils.runRemoveJarOperation(removeJarOperation, executor, sessionId)
- case showJarsOperation: ShowJarsOperation if isFlinkVersionAtMost("1.15") =>
- resultSet = OperationUtils.runShowJarOperation(showJarsOperation, executor, sessionId)
- case operation: Operation => runOperation(operation)
- }
+ val resultFetcher = executor.executeStatement(
+ new OperationHandle(getHandle.identifier),
+ statement)
+ jobId = FlinkEngineUtils.getResultJobId(resultFetcher)
+ resultSet = ResultSetUtil.fromResultFetcher(resultFetcher, resultMaxRows)
setState(OperationState.FINISHED)
} catch {
onError(cancel = true)
@@ -100,163 +72,4 @@ class ExecuteStatement(
shutdownTimeoutMonitor()
}
}
-
- private def runQueryOperation(operation: QueryOperation): Unit = {
- var resultId: String = null
- try {
- val resultDescriptor = executor.executeQuery(sessionId, operation)
- val dataTypes = resultDescriptor.getResultSchema.getColumnDataTypes.asScala.toList
-
- resultId = resultDescriptor.getResultId
-
- val rows = new ArrayBuffer[Row]()
- var loop = true
-
- while (loop) {
- Thread.sleep(50) // slow the processing down
-
- val pageSize = Math.min(500, resultMaxRows)
- val result = executor.snapshotResult(sessionId, resultId, pageSize)
- result.getType match {
- case TypedResult.ResultType.PAYLOAD =>
- (1 to result.getPayload).foreach { page =>
- if (rows.size < resultMaxRows) {
- val result = executor.retrieveResultPage(resultId, page)
- rows ++= result.asScala.map(r => convertToRow(r, dataTypes))
- } else {
- loop = false
- }
- }
- case TypedResult.ResultType.EOS => loop = false
- case TypedResult.ResultType.EMPTY =>
- }
- }
-
- resultSet = ResultSet.builder
- .resultKind(ResultKind.SUCCESS_WITH_CONTENT)
- .columns(resultDescriptor.getResultSchema.getColumns)
- .data(rows.slice(0, resultMaxRows).toArray[Row])
- .build
- } finally {
- if (resultId != null) {
- cleanupQueryResult(resultId)
- }
- }
- }
-
- private def runModifyOperation(operation: ModifyOperation): Unit = {
- val result = executor.executeOperation(sessionId, operation)
- jobId = result.getJobClient.asScala.map(_.getJobID)
- resultSet = ResultSet.fromJobId(jobId.orNull)
- }
-
- private def runOperation(operation: Operation): Unit = {
- val result = executor.executeOperation(sessionId, operation)
- jobId = result.getJobClient.asScala.map(_.getJobID)
- // after FLINK-24461, TableResult#await() would block insert statements
- // until the job finishes, instead of returning row affected immediately
- resultSet = ResultSet.fromTableResult(result)
- }
-
- private def cleanupQueryResult(resultId: String): Unit = {
- try {
- executor.cancelQuery(sessionId, resultId)
- } catch {
- case t: Throwable =>
- warn(s"Failed to clean result set $resultId in session $sessionId", t)
- }
- }
-
- private[this] def convertToRow(r: RowData, dataTypes: List[DataType]): Row = {
- val row = Row.withPositions(r.getRowKind, r.getArity)
- for (i <- 0 until r.getArity) {
- val dataType = dataTypes(i)
- dataType.getLogicalType match {
- case arrayType: ArrayType =>
- val arrayData = r.getArray(i)
- if (arrayData == null) {
- row.setField(i, null)
- }
- arrayData match {
- case d: GenericArrayData =>
- row.setField(i, d.toObjectArray)
- case d: BinaryArrayData =>
- row.setField(i, d.toObjectArray(arrayType.getElementType))
- case _ =>
- }
- case _: BinaryType | _: VarBinaryType =>
- row.setField(i, r.getBinary(i))
- case _: BigIntType =>
- row.setField(i, r.getLong(i))
- case _: BooleanType =>
- row.setField(i, r.getBoolean(i))
- case _: VarCharType | _: CharType =>
- row.setField(i, r.getString(i))
- case t: DecimalType =>
- row.setField(i, r.getDecimal(i, t.getPrecision, t.getScale).toBigDecimal)
- case _: DateType =>
- val date = RowSetUtils.formatLocalDate(LocalDate.ofEpochDay(r.getInt(i)))
- row.setField(i, date)
- case _: TimeType =>
- val time = RowSetUtils.formatLocalTime(LocalTime.ofNanoOfDay(r.getLong(i) * 1000 * 1000))
- row.setField(i, time)
- case t: TimestampType =>
- val ts = RowSetUtils
- .formatLocalDateTime(r.getTimestamp(i, t.getPrecision)
- .toLocalDateTime)
- row.setField(i, ts)
- case _: TinyIntType =>
- row.setField(i, r.getByte(i))
- case _: SmallIntType =>
- row.setField(i, r.getShort(i))
- case _: IntType =>
- row.setField(i, r.getInt(i))
- case _: FloatType =>
- row.setField(i, r.getFloat(i))
- case mapType: MapType =>
- val mapData = r.getMap(i)
- if (mapData != null && mapData.size > 0) {
- val keyType = mapType.getKeyType
- val valueType = mapType.getValueType
- mapData match {
- case d: BinaryMapData =>
- val kvArray = toArray(keyType, valueType, d)
- val map: util.Map[Any, Any] = new util.HashMap[Any, Any]
- for (i <- kvArray._1.indices) {
- val value: Any = kvArray._2(i)
- map.put(kvArray._1(i), value)
- }
- row.setField(i, map)
- case d: GenericMapData => // TODO
- }
- } else {
- row.setField(i, null)
- }
- case _: DoubleType =>
- row.setField(i, r.getDouble(i))
- case t: RowType =>
- val fieldDataTypes = DynMethods.builder("getFieldDataTypes")
- .impl(classOf[DataType], classOf[DataType])
- .buildStatic
- .invoke[util.List[DataType]](dataType)
- .asScala.toList
- val internalRowData = r.getRow(i, t.getFieldCount)
- val internalRow = convertToRow(internalRowData, fieldDataTypes)
- row.setField(i, internalRow)
- case t =>
- val hiveString = toHiveString((row.getField(i), t))
- row.setField(i, hiveString)
- }
- }
- row
- }
-
- private[this] def toArray(
- keyType: LogicalType,
- valueType: LogicalType,
- arrayData: BinaryMapData): (Array[_], Array[_]) = {
-
- arrayData.keyArray().toObjectArray(keyType) -> arrayData.valueArray().toObjectArray(valueType)
- }
-
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperation.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperation.scala
index 2859d659e62..d734cea0550 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperation.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperation.scala
@@ -21,8 +21,9 @@ import java.io.IOException
import scala.collection.JavaConverters.collectionAsScalaIterableConverter
-import org.apache.flink.table.client.gateway.Executor
-import org.apache.flink.table.client.gateway.context.SessionContext
+import org.apache.flink.configuration.Configuration
+import org.apache.flink.table.gateway.service.context.SessionContext
+import org.apache.flink.table.gateway.service.operation.OperationExecutor
import org.apache.hive.service.rpc.thrift.{TGetResultSetMetadataResp, TRowSet, TTableSchema}
import org.apache.kyuubi.{KyuubiSQLException, Utils}
@@ -36,12 +37,16 @@ import org.apache.kyuubi.session.Session
abstract class FlinkOperation(session: Session) extends AbstractOperation(session) {
+ protected val flinkSession: org.apache.flink.table.gateway.service.session.Session =
+ session.asInstanceOf[FlinkSessionImpl].fSession
+
+ protected val executor: OperationExecutor = flinkSession.createExecutor(
+ Configuration.fromMap(flinkSession.getSessionConfig))
+
protected val sessionContext: SessionContext = {
session.asInstanceOf[FlinkSessionImpl].sessionContext
}
- protected val executor: Executor = session.asInstanceOf[FlinkSessionImpl].executor
-
protected val sessionId: String = session.handle.identifier.toString
protected var resultSet: ResultSet = _
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkSQLOperationManager.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkSQLOperationManager.scala
index d7b5e297d1a..712c13596cb 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkSQLOperationManager.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/FlinkSQLOperationManager.scala
@@ -23,6 +23,7 @@ import scala.collection.JavaConverters._
import org.apache.kyuubi.KyuubiSQLException
import org.apache.kyuubi.config.KyuubiConf._
+import org.apache.kyuubi.engine.flink.FlinkEngineUtils
import org.apache.kyuubi.engine.flink.result.Constants
import org.apache.kyuubi.engine.flink.session.FlinkSessionImpl
import org.apache.kyuubi.operation.{NoneMode, Operation, OperationManager, PlanOnlyMode}
@@ -44,7 +45,8 @@ class FlinkSQLOperationManager extends OperationManager("FlinkSQLOperationManage
runAsync: Boolean,
queryTimeout: Long): Operation = {
val flinkSession = session.asInstanceOf[FlinkSessionImpl]
- if (flinkSession.sessionContext.getConfigMap.getOrDefault(
+ val sessionConfig = flinkSession.fSession.getSessionConfig
+ if (sessionConfig.getOrDefault(
ENGINE_OPERATION_CONVERT_CATALOG_DATABASE_ENABLED.key,
operationConvertCatalogDatabaseDefault.toString).toBoolean) {
val catalogDatabaseOperation = processCatalogDatabase(session, statement, confOverlay)
@@ -53,11 +55,13 @@ class FlinkSQLOperationManager extends OperationManager("FlinkSQLOperationManage
}
}
- val mode = PlanOnlyMode.fromString(flinkSession.sessionContext.getConfigMap.getOrDefault(
- OPERATION_PLAN_ONLY_MODE.key,
- operationModeDefault))
+ val mode = PlanOnlyMode.fromString(
+ sessionConfig.getOrDefault(
+ OPERATION_PLAN_ONLY_MODE.key,
+ operationModeDefault))
- flinkSession.sessionContext.set(OPERATION_PLAN_ONLY_MODE.key, mode.name)
+ val sessionContext = FlinkEngineUtils.getSessionContext(flinkSession.fSession)
+ sessionContext.set(OPERATION_PLAN_ONLY_MODE.key, mode.name)
val resultMaxRows =
flinkSession.normalizedConf.getOrElse(
ENGINE_FLINK_MAX_ROWS.key,
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCatalogs.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCatalogs.scala
index 11dd760e4ec..2453716812d 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCatalogs.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCatalogs.scala
@@ -17,6 +17,8 @@
package org.apache.kyuubi.engine.flink.operation
+import scala.collection.convert.ImplicitConversions._
+
import org.apache.kyuubi.engine.flink.result.ResultSetUtil
import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant.TABLE_CAT
import org.apache.kyuubi.session.Session
@@ -25,8 +27,8 @@ class GetCatalogs(session: Session) extends FlinkOperation(session) {
override protected def runInternal(): Unit = {
try {
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
- val catalogs = tableEnv.listCatalogs.toList
+ val catalogManager = sessionContext.getSessionState.catalogManager
+ val catalogs = catalogManager.listCatalogs.toList
resultSet = ResultSetUtil.stringListToResultSet(catalogs, TABLE_CAT)
} catch onError()
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetColumns.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetColumns.scala
index 6ce2a6ac7e7..b1a7c0c3ee5 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetColumns.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetColumns.scala
@@ -21,7 +21,7 @@ import scala.collection.JavaConverters._
import org.apache.commons.lang3.StringUtils
import org.apache.flink.table.api.{DataTypes, ResultKind}
-import org.apache.flink.table.catalog.Column
+import org.apache.flink.table.catalog.{Column, ObjectIdentifier}
import org.apache.flink.table.types.logical._
import org.apache.flink.types.Row
@@ -40,17 +40,17 @@ class GetColumns(
override protected def runInternal(): Unit = {
try {
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
val catalogName =
- if (StringUtils.isEmpty(catalogNameOrEmpty)) tableEnv.getCurrentCatalog
+ if (StringUtils.isEmpty(catalogNameOrEmpty)) executor.getCurrentCatalog
else catalogNameOrEmpty
val schemaNameRegex = toJavaRegex(schemaNamePattern)
val tableNameRegex = toJavaRegex(tableNamePattern)
val columnNameRegex = toJavaRegex(columnNamePattern).r
- val columns = tableEnv.getCatalog(catalogName).asScala.toArray.flatMap { flinkCatalog =>
+ val catalogManager = sessionContext.getSessionState.catalogManager
+ val columns = catalogManager.getCatalog(catalogName).asScala.toArray.flatMap { flinkCatalog =>
SchemaHelper.getSchemasWithPattern(flinkCatalog, schemaNameRegex)
.flatMap { schemaName =>
SchemaHelper.getFlinkTablesWithPattern(
@@ -60,7 +60,8 @@ class GetColumns(
tableNameRegex)
.filter { _._2.isDefined }
.flatMap { case (tableName, _) =>
- val flinkTable = tableEnv.from(s"`$catalogName`.`$schemaName`.`$tableName`")
+ val flinkTable = catalogManager.getTable(
+ ObjectIdentifier.of(catalogName, schemaName, tableName)).get()
val resolvedSchema = flinkTable.getResolvedSchema
resolvedSchema.getColumns.asScala.toArray.zipWithIndex
.filter { case (column, _) =>
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala
index 3e42e9aa6ec..5f82de4a689 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentCatalog.scala
@@ -31,8 +31,7 @@ class GetCurrentCatalog(session: Session) extends FlinkOperation(session) {
override protected def runInternal(): Unit = {
try {
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
- val catalog = tableEnv.getCurrentCatalog
+ val catalog = executor.getCurrentCatalog
resultSet = ResultSetUtil.stringListToResultSet(List(catalog), TABLE_CAT)
} catch onError()
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala
index 014ca2ea379..107609c0639 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetCurrentDatabase.scala
@@ -31,8 +31,7 @@ class GetCurrentDatabase(session: Session) extends FlinkOperation(session) {
override protected def runInternal(): Unit = {
try {
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
- val database = tableEnv.getCurrentDatabase
+ val database = sessionContext.getSessionState.catalogManager.getCurrentDatabase
resultSet = ResultSetUtil.stringListToResultSet(List(database), TABLE_SCHEM)
} catch onError()
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetFunctions.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetFunctions.scala
index ab870ab7931..85f34a29a05 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetFunctions.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetFunctions.scala
@@ -20,9 +20,10 @@ package org.apache.kyuubi.engine.flink.operation
import java.sql.DatabaseMetaData
import scala.collection.JavaConverters._
+import scala.collection.convert.ImplicitConversions._
import org.apache.commons.lang3.StringUtils
-import org.apache.flink.table.api.{DataTypes, ResultKind, TableEnvironment}
+import org.apache.flink.table.api.{DataTypes, ResultKind}
import org.apache.flink.table.catalog.Column
import org.apache.flink.types.Row
@@ -42,17 +43,20 @@ class GetFunctions(
try {
val schemaPattern = toJavaRegex(schemaName)
val functionPattern = toJavaRegex(functionName)
- val tableEnv: TableEnvironment = sessionContext.getExecutionContext.getTableEnvironment
+ val functionCatalog = sessionContext.getSessionState.functionCatalog
+ val catalogManager = sessionContext.getSessionState.catalogManager
+
val systemFunctions = filterPattern(
- tableEnv.listFunctions().diff(tableEnv.listUserDefinedFunctions()),
+ functionCatalog.getFunctions
+ .diff(functionCatalog.getUserDefinedFunctions),
functionPattern)
.map { f =>
Row.of(null, null, f, null, Integer.valueOf(DatabaseMetaData.functionResultUnknown), null)
- }
- val catalogFunctions = tableEnv.listCatalogs()
+ }.toArray
+ val catalogFunctions = catalogManager.listCatalogs()
.filter { c => StringUtils.isEmpty(catalogName) || c == catalogName }
.flatMap { c =>
- val catalog = tableEnv.getCatalog(c).get()
+ val catalog = catalogManager.getCatalog(c).get()
filterPattern(catalog.listDatabases().asScala, schemaPattern)
.flatMap { d =>
filterPattern(catalog.listFunctions(d).asScala, functionPattern)
@@ -66,7 +70,7 @@ class GetFunctions(
null)
}
}
- }
+ }.toArray
resultSet = ResultSet.builder.resultKind(ResultKind.SUCCESS_WITH_CONTENT)
.columns(
Column.physical(FUNCTION_CAT, DataTypes.STRING()),
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetPrimaryKeys.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetPrimaryKeys.scala
index b534feb1fd9..5b9060cf184 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetPrimaryKeys.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetPrimaryKeys.scala
@@ -21,8 +21,9 @@ import scala.collection.JavaConverters._
import org.apache.commons.lang3.StringUtils
import org.apache.flink.table.api.{DataTypes, ResultKind}
-import org.apache.flink.table.catalog.Column
+import org.apache.flink.table.catalog.{Column, ObjectIdentifier}
import org.apache.flink.types.Row
+import org.apache.flink.util.FlinkException
import org.apache.kyuubi.engine.flink.result.ResultSet
import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._
@@ -37,22 +38,25 @@ class GetPrimaryKeys(
override protected def runInternal(): Unit = {
try {
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
+ val catalogManager = sessionContext.getSessionState.catalogManager
val catalogName =
- if (StringUtils.isEmpty(catalogNameOrEmpty)) tableEnv.getCurrentCatalog
+ if (StringUtils.isEmpty(catalogNameOrEmpty)) catalogManager.getCurrentCatalog
else catalogNameOrEmpty
val schemaName =
if (StringUtils.isEmpty(schemaNameOrEmpty)) {
- if (catalogName != tableEnv.getCurrentCatalog) {
- tableEnv.getCatalog(catalogName).get().getDefaultDatabase
+ if (catalogName != executor.getCurrentCatalog) {
+ catalogManager.getCatalog(catalogName).get().getDefaultDatabase
} else {
- tableEnv.getCurrentDatabase
+ catalogManager.getCurrentDatabase
}
} else schemaNameOrEmpty
- val flinkTable = tableEnv.from(s"`$catalogName`.`$schemaName`.`$tableName`")
+ val flinkTable = catalogManager
+ .getTable(ObjectIdentifier.of(catalogName, schemaName, tableName))
+ .orElseThrow(() =>
+ new FlinkException(s"Table `$catalogName`.`$schemaName`.`$tableName`` not found."))
val resolvedSchema = flinkTable.getResolvedSchema
val primaryKeySchema = resolvedSchema.getPrimaryKey
@@ -102,5 +106,4 @@ class GetPrimaryKeys(
)
// format: on
}
-
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetSchemas.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetSchemas.scala
index 6715b232073..f56ddd8b18e 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetSchemas.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetSchemas.scala
@@ -18,9 +18,10 @@
package org.apache.kyuubi.engine.flink.operation
import scala.collection.JavaConverters._
+import scala.collection.convert.ImplicitConversions._
import org.apache.commons.lang3.StringUtils
-import org.apache.flink.table.api.{DataTypes, ResultKind, TableEnvironment}
+import org.apache.flink.table.api.{DataTypes, ResultKind}
import org.apache.flink.table.catalog.Column
import org.apache.flink.types.Row
@@ -35,14 +36,14 @@ class GetSchemas(session: Session, catalogName: String, schema: String)
override protected def runInternal(): Unit = {
try {
val schemaPattern = toJavaRegex(schema)
- val tableEnv: TableEnvironment = sessionContext.getExecutionContext.getTableEnvironment
- val schemas = tableEnv.listCatalogs()
+ val catalogManager = sessionContext.getSessionState.catalogManager
+ val schemas = catalogManager.listCatalogs()
.filter { c => StringUtils.isEmpty(catalogName) || c == catalogName }
.flatMap { c =>
- val catalog = tableEnv.getCatalog(c).get()
+ val catalog = catalogManager.getCatalog(c).get()
filterPattern(catalog.listDatabases().asScala, schemaPattern)
.map { d => Row.of(d, c) }
- }
+ }.toArray
resultSet = ResultSet.builder.resultKind(ResultKind.SUCCESS_WITH_CONTENT)
.columns(
Column.physical(TABLE_SCHEM, DataTypes.STRING()),
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetTables.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetTables.scala
index a4e55715a5a..325a501671e 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetTables.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/GetTables.scala
@@ -37,16 +37,16 @@ class GetTables(
override protected def runInternal(): Unit = {
try {
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
+ val catalogManager = sessionContext.getSessionState.catalogManager
val catalogName =
- if (StringUtils.isEmpty(catalogNameOrEmpty)) tableEnv.getCurrentCatalog
+ if (StringUtils.isEmpty(catalogNameOrEmpty)) catalogManager.getCurrentCatalog
else catalogNameOrEmpty
val schemaNameRegex = toJavaRegex(schemaNamePattern)
val tableNameRegex = toJavaRegex(tableNamePattern)
- val tables = tableEnv.getCatalog(catalogName).asScala.toArray.flatMap { flinkCatalog =>
+ val tables = catalogManager.getCatalog(catalogName).asScala.toArray.flatMap { flinkCatalog =>
SchemaHelper.getSchemasWithPattern(flinkCatalog, schemaNameRegex)
.flatMap { schemaName =>
SchemaHelper.getFlinkTablesWithPattern(
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/OperationUtils.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/OperationUtils.scala
deleted file mode 100644
index 7d624948c18..00000000000
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/OperationUtils.scala
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.kyuubi.engine.flink.operation
-
-import java.util
-
-import scala.collection.JavaConverters._
-import scala.collection.mutable.ArrayBuffer
-
-import org.apache.flink.table.api.{DataTypes, ResultKind}
-import org.apache.flink.table.catalog.Column
-import org.apache.flink.table.client.gateway.Executor
-import org.apache.flink.table.operations.command._
-import org.apache.flink.types.Row
-
-import org.apache.kyuubi.engine.flink.result.{ResultSet, ResultSetUtil}
-import org.apache.kyuubi.engine.flink.result.ResultSetUtil.successResultSet
-import org.apache.kyuubi.reflection.DynMethods
-
-object OperationUtils {
-
- /**
- * Runs a SetOperation with executor. Returns when SetOperation is executed successfully.
- *
- * @param setOperation Set operation.
- * @param executor A gateway for communicating with Flink and other external systems.
- * @param sessionId Id of the session.
- * @return A ResultSet of SetOperation execution.
- */
- def runSetOperation(
- setOperation: SetOperation,
- executor: Executor,
- sessionId: String): ResultSet = {
- if (setOperation.getKey.isPresent) {
- val key: String = setOperation.getKey.get.trim
-
- if (setOperation.getValue.isPresent) {
- val newValue: String = setOperation.getValue.get.trim
- executor.setSessionProperty(sessionId, key, newValue)
- }
-
- val value = executor.getSessionConfigMap(sessionId).getOrDefault(key, "")
- ResultSet.builder
- .resultKind(ResultKind.SUCCESS_WITH_CONTENT)
- .columns(
- Column.physical("key", DataTypes.STRING()),
- Column.physical("value", DataTypes.STRING()))
- .data(Array(Row.of(key, value)))
- .build
- } else {
- // show all properties if set without key
- val properties: util.Map[String, String] = executor.getSessionConfigMap(sessionId)
-
- val entries = ArrayBuffer.empty[Row]
- properties.forEach((key, value) => entries.append(Row.of(key, value)))
-
- if (entries.nonEmpty) {
- val prettyEntries = entries.sortBy(_.getField(0).asInstanceOf[String])
- ResultSet.builder
- .resultKind(ResultKind.SUCCESS_WITH_CONTENT)
- .columns(
- Column.physical("key", DataTypes.STRING()),
- Column.physical("value", DataTypes.STRING()))
- .data(prettyEntries.toArray)
- .build
- } else {
- ResultSet.builder
- .resultKind(ResultKind.SUCCESS_WITH_CONTENT)
- .columns(
- Column.physical("key", DataTypes.STRING()),
- Column.physical("value", DataTypes.STRING()))
- .data(Array[Row]())
- .build
- }
- }
- }
-
- /**
- * Runs a ResetOperation with executor. Returns when ResetOperation is executed successfully.
- *
- * @param resetOperation Reset operation.
- * @param executor A gateway for communicating with Flink and other external systems.
- * @param sessionId Id of the session.
- * @return A ResultSet of ResetOperation execution.
- */
- def runResetOperation(
- resetOperation: ResetOperation,
- executor: Executor,
- sessionId: String): ResultSet = {
- if (resetOperation.getKey.isPresent) {
- // reset the given property
- executor.resetSessionProperty(sessionId, resetOperation.getKey.get())
- } else {
- // reset all properties
- executor.resetSessionProperties(sessionId)
- }
- successResultSet
- }
-
- /**
- * Runs a AddJarOperation with the executor. Currently only jars on local filesystem
- * are supported.
- *
- * @param addJarOperation Add-jar operation.
- * @param executor A gateway for communicating with Flink and other external systems.
- * @param sessionId Id of the session.
- * @return A ResultSet of AddJarOperation execution.
- */
- def runAddJarOperation(
- addJarOperation: AddJarOperation,
- executor: Executor,
- sessionId: String): ResultSet = {
- // Removed by FLINK-27790
- val addJar = DynMethods.builder("addJar")
- .impl(executor.getClass, classOf[String], classOf[String])
- .build(executor)
- addJar.invoke[Void](sessionId, addJarOperation.getPath)
- successResultSet
- }
-
- /**
- * Runs a RemoveJarOperation with the executor. Only jars added by AddJarOperation could
- * be removed.
- *
- * @param removeJarOperation Remove-jar operation.
- * @param executor A gateway for communicating with Flink and other external systems.
- * @param sessionId Id of the session.
- * @return A ResultSet of RemoveJarOperation execution.
- */
- def runRemoveJarOperation(
- removeJarOperation: RemoveJarOperation,
- executor: Executor,
- sessionId: String): ResultSet = {
- executor.removeJar(sessionId, removeJarOperation.getPath)
- successResultSet
- }
-
- /**
- * Runs a ShowJarsOperation with the executor. Returns the jars of the current session.
- *
- * @param showJarsOperation Show-jar operation.
- * @param executor A gateway for communicating with Flink and other external systems.
- * @param sessionId Id of the session.
- * @return A ResultSet of ShowJarsOperation execution.
- */
- def runShowJarOperation(
- showJarsOperation: ShowJarsOperation,
- executor: Executor,
- sessionId: String): ResultSet = {
- // Removed by FLINK-27790
- val listJars = DynMethods.builder("listJars")
- .impl(executor.getClass, classOf[String])
- .build(executor)
- val jars = listJars.invoke[util.List[String]](sessionId)
- ResultSetUtil.stringListToResultSet(jars.asScala.toList, "jar")
- }
-}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyStatement.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyStatement.scala
index afe04a30736..4f5d8218fd6 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyStatement.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyStatement.scala
@@ -17,10 +17,11 @@
package org.apache.kyuubi.engine.flink.operation
+import com.google.common.base.Preconditions
import org.apache.flink.table.api.TableEnvironment
+import org.apache.flink.table.gateway.api.operation.OperationHandle
import org.apache.flink.table.operations.command._
-import org.apache.kyuubi.engine.flink.FlinkEngineUtils.isFlinkVersionAtMost
import org.apache.kyuubi.engine.flink.result.ResultSetUtil
import org.apache.kyuubi.operation.{ExecutionMode, ParseMode, PhysicalMode, PlanOnlyMode, UnknownMode}
import org.apache.kyuubi.operation.PlanOnlyMode.{notSupportedModeError, unknownModeError}
@@ -46,18 +47,19 @@ class PlanOnlyStatement(
override protected def runInternal(): Unit = {
try {
- val operation = executor.parseStatement(sessionId, statement)
+ val operations = executor.getTableEnvironment.getParser.parse(statement)
+ Preconditions.checkArgument(
+ operations.size() == 1,
+ "Plan-only mode supports single statement only",
+ null)
+ val operation = operations.get(0)
operation match {
- case setOperation: SetOperation =>
- resultSet = OperationUtils.runSetOperation(setOperation, executor, sessionId)
- case resetOperation: ResetOperation =>
- resultSet = OperationUtils.runResetOperation(resetOperation, executor, sessionId)
- case addJarOperation: AddJarOperation if isFlinkVersionAtMost("1.15") =>
- resultSet = OperationUtils.runAddJarOperation(addJarOperation, executor, sessionId)
- case removeJarOperation: RemoveJarOperation =>
- resultSet = OperationUtils.runRemoveJarOperation(removeJarOperation, executor, sessionId)
- case showJarsOperation: ShowJarsOperation if isFlinkVersionAtMost("1.15") =>
- resultSet = OperationUtils.runShowJarOperation(showJarsOperation, executor, sessionId)
+ case _: SetOperation | _: ResetOperation | _: AddJarOperation | _: RemoveJarOperation |
+ _: ShowJarsOperation =>
+ val resultFetcher = executor.executeStatement(
+ new OperationHandle(getHandle.identifier),
+ statement)
+ resultSet = ResultSetUtil.fromResultFetcher(resultFetcher);
case _ => explainOperation(statement)
}
} catch {
@@ -66,7 +68,7 @@ class PlanOnlyStatement(
}
private def explainOperation(statement: String): Unit = {
- val tableEnv: TableEnvironment = sessionContext.getExecutionContext.getTableEnvironment
+ val tableEnv: TableEnvironment = executor.getTableEnvironment
val explainPlans =
tableEnv.explainSql(statement).split(s"$lineSeparator$lineSeparator")
val operationPlan = mode match {
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala
index 60214b2cd0f..f279ccda616 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentCatalog.scala
@@ -30,8 +30,8 @@ class SetCurrentCatalog(session: Session, catalog: String)
override protected def runInternal(): Unit = {
try {
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
- tableEnv.useCatalog(catalog)
+ val catalogManager = sessionContext.getSessionState.catalogManager
+ catalogManager.setCurrentCatalog(catalog)
setHasResultSet(false)
} catch onError()
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala
index 7610ab2f18c..70535e8344f 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/operation/SetCurrentDatabase.scala
@@ -30,8 +30,8 @@ class SetCurrentDatabase(session: Session, database: String)
override protected def runInternal(): Unit = {
try {
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
- tableEnv.useDatabase(database)
+ val catalogManager = sessionContext.getSessionState.catalogManager
+ catalogManager.setCurrentDatabase(database)
setHasResultSet(false)
} catch onError()
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala
index 09c40198856..b90be09ff86 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSet.scala
@@ -23,7 +23,7 @@ import scala.collection.JavaConverters._
import com.google.common.collect.Iterators
import org.apache.flink.api.common.JobID
-import org.apache.flink.table.api.{DataTypes, ResultKind, TableResult}
+import org.apache.flink.table.api.{DataTypes, ResultKind}
import org.apache.flink.table.catalog.Column
import org.apache.flink.types.Row
@@ -58,17 +58,6 @@ case class ResultSet(
*/
object ResultSet {
- def fromTableResult(tableResult: TableResult): ResultSet = {
- val schema = tableResult.getResolvedSchema
- // collect all rows from table result as list
- // this is ok as TableResult contains limited rows
- val rows = tableResult.collect.asScala.toArray
- builder.resultKind(tableResult.getResultKind)
- .columns(schema.getColumns)
- .data(rows)
- .build
- }
-
def fromJobId(jobID: JobID): ResultSet = {
val data: Array[Row] = if (jobID != null) {
Array(Row.of(jobID.toString))
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSetUtil.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSetUtil.scala
index ded271cf1d7..d6bc2badaf8 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSetUtil.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/result/ResultSetUtil.scala
@@ -17,14 +17,25 @@
package org.apache.kyuubi.engine.flink.result;
+import scala.collection.convert.ImplicitConversions._
+import scala.collection.mutable.ListBuffer
+
import org.apache.flink.table.api.DataTypes
import org.apache.flink.table.api.ResultKind
import org.apache.flink.table.catalog.Column
+import org.apache.flink.table.data.RowData
+import org.apache.flink.table.data.conversion.DataStructureConverters
+import org.apache.flink.table.gateway.service.result.ResultFetcher
+import org.apache.flink.table.types.DataType
import org.apache.flink.types.Row
+import org.apache.kyuubi.engine.flink.shim.FlinkResultSet
+
/** Utility object for building ResultSet. */
object ResultSetUtil {
+ private val FETCH_ROWS_PER_SECOND = 1000
+
/**
* Build a ResultSet with a column name and a list of String values.
*
@@ -54,4 +65,64 @@ object ResultSetUtil {
.columns(Column.physical("result", DataTypes.STRING))
.data(Array[Row](Row.of("OK")))
.build
+
+ def fromResultFetcher(resultFetcher: ResultFetcher, maxRows: Int): ResultSet = {
+ val schema = resultFetcher.getResultSchema
+ val resultRowData = ListBuffer.newBuilder[RowData]
+ var fetched: FlinkResultSet = null
+ var token: Long = 0
+ var rowNum: Int = 0
+ do {
+ fetched = new FlinkResultSet(resultFetcher.fetchResults(token, FETCH_ROWS_PER_SECOND))
+ val data = fetched.getData
+ val slice = data.slice(0, maxRows - rowNum)
+ resultRowData ++= slice
+ rowNum += slice.size
+ token = fetched.getNextToken
+ try Thread.sleep(1000L)
+ catch {
+ case _: InterruptedException => fetched.getNextToken == null
+ }
+ } while (
+ fetched.getNextToken != null &&
+ rowNum < maxRows &&
+ fetched.getResultType != org.apache.flink.table.gateway.api.results.ResultSet.ResultType.EOS
+ )
+ val dataTypes = resultFetcher.getResultSchema.getColumnDataTypes
+ ResultSet.builder
+ .resultKind(ResultKind.SUCCESS_WITH_CONTENT)
+ .columns(schema.getColumns)
+ .data(resultRowData.result().map(rd => convertToRow(rd, dataTypes.toList)).toArray)
+ .build
+ }
+
+ def fromResultFetcher(resultFetcher: ResultFetcher): ResultSet = {
+ val schema = resultFetcher.getResultSchema
+ val resultRowData = ListBuffer.newBuilder[RowData]
+ var fetched: FlinkResultSet = null
+ var token: Long = 0
+ do {
+ fetched = new FlinkResultSet(resultFetcher.fetchResults(token, FETCH_ROWS_PER_SECOND))
+ resultRowData ++= fetched.getData
+ token = fetched.getNextToken
+ try Thread.sleep(1000L)
+ catch {
+ case _: InterruptedException =>
+ }
+ } while (
+ fetched.getNextToken != null &&
+ fetched.getResultType != org.apache.flink.table.gateway.api.results.ResultSet.ResultType.EOS
+ )
+ val dataTypes = resultFetcher.getResultSchema.getColumnDataTypes
+ ResultSet.builder
+ .resultKind(ResultKind.SUCCESS_WITH_CONTENT)
+ .columns(schema.getColumns)
+ .data(resultRowData.result().map(rd => convertToRow(rd, dataTypes.toList)).toArray)
+ .build
+ }
+
+ private[this] def convertToRow(r: RowData, dataTypes: List[DataType]): Row = {
+ val converter = DataStructureConverters.getConverter(DataTypes.ROW(dataTypes: _*))
+ converter.toExternal(r).asInstanceOf[Row]
+ }
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSQLSessionManager.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSQLSessionManager.scala
index 07971e39fae..71caaa67a67 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSQLSessionManager.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSQLSessionManager.scala
@@ -17,12 +17,17 @@
package org.apache.kyuubi.engine.flink.session
-import org.apache.flink.table.client.gateway.context.DefaultContext
-import org.apache.flink.table.client.gateway.local.LocalExecutor
+import scala.collection.JavaConverters._
+import scala.collection.JavaConverters.mapAsJavaMap
+
+import org.apache.flink.table.gateway.api.session.SessionEnvironment
+import org.apache.flink.table.gateway.rest.util.SqlGatewayRestAPIVersion
+import org.apache.flink.table.gateway.service.context.DefaultContext
import org.apache.hive.service.rpc.thrift.TProtocolVersion
import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_HANDLE_KEY
import org.apache.kyuubi.engine.flink.operation.FlinkSQLOperationManager
+import org.apache.kyuubi.engine.flink.shim.FlinkSessionManager
import org.apache.kyuubi.session.{Session, SessionHandle, SessionManager}
class FlinkSQLSessionManager(engineContext: DefaultContext)
@@ -31,11 +36,11 @@ class FlinkSQLSessionManager(engineContext: DefaultContext)
override protected def isServer: Boolean = false
val operationManager = new FlinkSQLOperationManager()
- val executor = new LocalExecutor(engineContext)
+ val sessionManager = new FlinkSessionManager(engineContext)
override def start(): Unit = {
super.start()
- executor.start()
+ sessionManager.start()
}
override protected def createSession(
@@ -46,19 +51,33 @@ class FlinkSQLSessionManager(engineContext: DefaultContext)
conf: Map[String, String]): Session = {
conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID).flatMap(
getSessionOption).getOrElse {
- new FlinkSessionImpl(
+ val flinkInternalSession = sessionManager.openSession(
+ SessionEnvironment.newBuilder
+ .setSessionEndpointVersion(SqlGatewayRestAPIVersion.V1)
+ .addSessionConfig(mapAsJavaMap(conf))
+ .build)
+ val sessionConfig = flinkInternalSession.getSessionConfig
+ sessionConfig.putAll(conf.asJava)
+ val session = new FlinkSessionImpl(
protocol,
user,
password,
ipAddress,
conf,
this,
- executor)
+ flinkInternalSession)
+ session
}
}
override def closeSession(sessionHandle: SessionHandle): Unit = {
super.closeSession(sessionHandle)
- executor.closeSession(sessionHandle.toString)
+ sessionManager.closeSession(
+ new org.apache.flink.table.gateway.api.session.SessionHandle(sessionHandle.identifier))
+ }
+
+ override def stop(): Unit = synchronized {
+ sessionManager.stop()
+ super.stop()
}
}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala
index a4b6a8a902b..09f5ac94319 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala
@@ -19,10 +19,12 @@ package org.apache.kyuubi.engine.flink.session
import scala.util.control.NonFatal
+import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.util.EnvironmentInformation
import org.apache.flink.table.client.gateway.SqlExecutionException
-import org.apache.flink.table.client.gateway.context.SessionContext
-import org.apache.flink.table.client.gateway.local.LocalExecutor
+import org.apache.flink.table.gateway.api.operation.OperationHandle
+import org.apache.flink.table.gateway.service.context.SessionContext
+import org.apache.flink.table.gateway.service.session.{Session => FSession}
import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TProtocolVersion}
import org.apache.kyuubi.KyuubiSQLException
@@ -37,14 +39,15 @@ class FlinkSessionImpl(
ipAddress: String,
conf: Map[String, String],
sessionManager: SessionManager,
- val executor: LocalExecutor)
+ val fSession: FSession)
extends AbstractSession(protocol, user, password, ipAddress, conf, sessionManager) {
override val handle: SessionHandle =
- conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID).getOrElse(SessionHandle())
+ conf.get(KYUUBI_SESSION_HANDLE_KEY).map(SessionHandle.fromUUID)
+ .getOrElse(SessionHandle.fromUUID(fSession.getSessionHandle.getIdentifier.toString))
lazy val sessionContext: SessionContext = {
- FlinkEngineUtils.getSessionContext(executor, handle.identifier.toString)
+ FlinkEngineUtils.getSessionContext(fSession)
}
private def setModifiableConfig(key: String, value: String): Unit = {
@@ -56,16 +59,15 @@ class FlinkSessionImpl(
}
override def open(): Unit = {
- executor.openSession(handle.identifier.toString)
+ val executor = fSession.createExecutor(Configuration.fromMap(fSession.getSessionConfig))
val (useCatalogAndDatabaseConf, otherConf) = normalizedConf.partition { case (k, _) =>
Array("use:catalog", "use:database").contains(k)
}
useCatalogAndDatabaseConf.get("use:catalog").foreach { catalog =>
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
try {
- tableEnv.useCatalog(catalog)
+ executor.executeStatement(OperationHandle.create, s"USE CATALOG $catalog")
} catch {
case NonFatal(e) =>
throw e
@@ -73,9 +75,8 @@ class FlinkSessionImpl(
}
useCatalogAndDatabaseConf.get("use:database").foreach { database =>
- val tableEnv = sessionContext.getExecutionContext.getTableEnvironment
try {
- tableEnv.useDatabase(database)
+ executor.executeStatement(OperationHandle.create, s"USE $database")
} catch {
case NonFatal(e) =>
if (database != "default") {
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkResultSet.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkResultSet.scala
new file mode 100644
index 00000000000..e3dd0f08124
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkResultSet.scala
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.engine.flink.shim
+
+import java.lang.{Long => JLong}
+import java.util
+
+import org.apache.flink.table.data.RowData
+import org.apache.flink.table.gateway.api.results.ResultSet.ResultType
+
+import org.apache.kyuubi.engine.flink.FlinkEngineUtils
+import org.apache.kyuubi.reflection.DynMethods
+
+class FlinkResultSet(resultSet: AnyRef) {
+
+ def getData: util.List[RowData] = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynMethods.builder("getData")
+ .impl("org.apache.flink.table.gateway.api.results.ResultSet")
+ .build()
+ .invoke(resultSet)
+ .asInstanceOf[util.List[RowData]]
+ } else {
+ DynMethods.builder("getData")
+ .impl("org.apache.flink.table.gateway.api.results.ResultSetImpl")
+ .build()
+ .invoke(resultSet)
+ .asInstanceOf[util.List[RowData]]
+ }
+ }
+
+ def getNextToken: JLong = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynMethods.builder("getNextToken")
+ .impl("org.apache.flink.table.gateway.api.results.ResultSet")
+ .build()
+ .invoke(resultSet)
+ .asInstanceOf[JLong]
+ } else {
+ DynMethods.builder("getNextToken")
+ .impl("org.apache.flink.table.gateway.api.results.ResultSetImpl")
+ .build()
+ .invoke(resultSet)
+ .asInstanceOf[JLong]
+ }
+ }
+
+ def getResultType: ResultType = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynMethods.builder("getResultType")
+ .impl("org.apache.flink.table.gateway.api.results.ResultSet")
+ .build()
+ .invoke(resultSet)
+ .asInstanceOf[ResultType]
+ } else {
+ DynMethods.builder("getResultType")
+ .impl("org.apache.flink.table.gateway.api.results.ResultSetImpl")
+ .build()
+ .invoke(resultSet)
+ .asInstanceOf[ResultType]
+ }
+ }
+}
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkSessionManager.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkSessionManager.scala
new file mode 100644
index 00000000000..e34819dd656
--- /dev/null
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/shim/FlinkSessionManager.scala
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.engine.flink.shim
+
+import org.apache.flink.table.gateway.api.session.{SessionEnvironment, SessionHandle}
+import org.apache.flink.table.gateway.service.context.DefaultContext
+import org.apache.flink.table.gateway.service.session.Session
+
+import org.apache.kyuubi.engine.flink.FlinkEngineUtils
+import org.apache.kyuubi.reflection.{DynConstructors, DynMethods}
+
+class FlinkSessionManager(engineContext: DefaultContext) {
+
+ val sessionManager: AnyRef = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynConstructors.builder().impl(
+ "org.apache.flink.table.gateway.service.session.SessionManager",
+ classOf[DefaultContext])
+ .build()
+ .newInstance(engineContext)
+ } else {
+ DynConstructors.builder().impl(
+ "org.apache.flink.table.gateway.service.session.SessionManagerImpl",
+ classOf[DefaultContext])
+ .build()
+ .newInstance(engineContext)
+ }
+ }
+
+ def start(): Unit = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynMethods.builder("start")
+ .impl("org.apache.flink.table.gateway.service.session.SessionManager")
+ .build()
+ .invoke(sessionManager)
+ } else {
+ DynMethods.builder("start")
+ .impl("org.apache.flink.table.gateway.service.session.SessionManagerImpl")
+ .build()
+ .invoke(sessionManager)
+ }
+ }
+
+ def stop(): Unit = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynMethods.builder("stop")
+ .impl("org.apache.flink.table.gateway.service.session.SessionManager")
+ .build()
+ .invoke(sessionManager)
+ } else {
+ DynMethods.builder("stop")
+ .impl("org.apache.flink.table.gateway.service.session.SessionManagerImpl")
+ .build()
+ .invoke(sessionManager)
+ }
+ }
+
+ def getSession(sessionHandle: SessionHandle): Session = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynMethods.builder("getSession")
+ .impl(
+ "org.apache.flink.table.gateway.service.session.SessionManager",
+ classOf[SessionHandle])
+ .build()
+ .invoke(sessionManager, sessionHandle)
+ .asInstanceOf[Session]
+ } else {
+ DynMethods.builder("getSession")
+ .impl(
+ "org.apache.flink.table.gateway.service.session.SessionManagerImpl",
+ classOf[SessionHandle])
+ .build()
+ .invoke(sessionManager, sessionHandle)
+ .asInstanceOf[Session]
+ }
+ }
+
+ def openSession(environment: SessionEnvironment): Session = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynMethods.builder("openSession")
+ .impl(
+ "org.apache.flink.table.gateway.service.session.SessionManager",
+ classOf[SessionEnvironment])
+ .build()
+ .invoke(sessionManager, environment)
+ .asInstanceOf[Session]
+ } else {
+ DynMethods.builder("openSession")
+ .impl(
+ "org.apache.flink.table.gateway.service.session.SessionManagerImpl",
+ classOf[SessionEnvironment])
+ .build()
+ .invoke(sessionManager, environment)
+ .asInstanceOf[Session]
+ }
+ }
+
+ def closeSession(sessionHandle: SessionHandle): Unit = {
+ if (FlinkEngineUtils.isFlinkVersionEqualTo("1.16")) {
+ DynMethods.builder("closeSession")
+ .impl(
+ "org.apache.flink.table.gateway.service.session.SessionManager",
+ classOf[SessionHandle])
+ .build()
+ .invoke(sessionManager, sessionHandle)
+ } else {
+ DynMethods.builder("closeSession")
+ .impl(
+ "org.apache.flink.table.gateway.service.session.SessionManagerImpl",
+ classOf[SessionHandle])
+ .build()
+ .invoke(sessionManager, sessionHandle)
+ }
+ }
+}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala
index aebcce6c589..c352429eadc 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithDiscoveryFlinkSQLEngine.scala
@@ -17,30 +17,14 @@
package org.apache.kyuubi.engine.flink
-import java.util.UUID
-
-import org.apache.kyuubi.config.KyuubiConf.{ENGINE_SHARE_LEVEL, ENGINE_TYPE}
-import org.apache.kyuubi.engine.ShareLevel
-import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_NAMESPACE}
+import org.apache.kyuubi.config.KyuubiConf
import org.apache.kyuubi.ha.client.{DiscoveryClient, DiscoveryClientProvider}
-trait WithDiscoveryFlinkSQLEngine extends WithFlinkSQLEngineOnYarn {
-
- override protected def engineRefId: String = UUID.randomUUID().toString
-
- def namespace: String = "/kyuubi/flink-yarn-application-test"
+trait WithDiscoveryFlinkSQLEngine {
- def shareLevel: String = ShareLevel.USER.toString
+ protected def namespace: String
- def engineType: String = "flink"
-
- override def withKyuubiConf: Map[String, String] = {
- Map(
- HA_NAMESPACE.key -> namespace,
- HA_ENGINE_REF_ID.key -> engineRefId,
- ENGINE_TYPE.key -> "FLINK_SQL",
- ENGINE_SHARE_LEVEL.key -> shareLevel)
- }
+ protected def conf: KyuubiConf
def withDiscoveryClient(f: DiscoveryClient => Unit): Unit = {
DiscoveryClientProvider.withDiscoveryClient(conf)(f)
@@ -49,7 +33,7 @@ trait WithDiscoveryFlinkSQLEngine extends WithFlinkSQLEngineOnYarn {
def getFlinkEngineServiceUrl: String = {
var hostPort: Option[(String, Int)] = None
var retries = 0
- while (hostPort.isEmpty && retries < 5) {
+ while (hostPort.isEmpty && retries < 10) {
withDiscoveryClient(client => hostPort = client.getServerHost(namespace))
retries += 1
Thread.sleep(1000L)
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
index c8435f9c54c..0001f31aebb 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
@@ -17,58 +17,167 @@
package org.apache.kyuubi.engine.flink
+import java.io.{File, FilenameFilter}
+import java.lang.ProcessBuilder.Redirect
+import java.net.URI
+import java.nio.file.{Files, Paths}
+
import scala.collection.JavaConverters._
+import scala.collection.mutable.ArrayBuffer
-import org.apache.flink.client.cli.{CustomCommandLine, DefaultCLI}
import org.apache.flink.configuration.{Configuration, RestOptions}
import org.apache.flink.runtime.minicluster.{MiniCluster, MiniClusterConfiguration}
-import org.apache.flink.table.client.gateway.context.DefaultContext
-import org.apache.kyuubi.KyuubiFunSuite
+import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiException, KyuubiFunSuite, SCALA_COMPILE_VERSION, Utils}
import org.apache.kyuubi.config.KyuubiConf
+import org.apache.kyuubi.config.KyuubiConf._
+import org.apache.kyuubi.ha.HighAvailabilityConf.HA_ADDRESSES
+import org.apache.kyuubi.zookeeper.EmbeddedZookeeper
+import org.apache.kyuubi.zookeeper.ZookeeperConf.{ZK_CLIENT_PORT, ZK_CLIENT_PORT_ADDRESS}
trait WithFlinkSQLEngineLocal extends KyuubiFunSuite with WithFlinkTestResources {
protected val flinkConfig = new Configuration()
+
protected var miniCluster: MiniCluster = _
- protected var engine: FlinkSQLEngine = _
- // conf will be loaded until start flink engine
+
+ protected var engineProcess: Process = _
+
+ private var zkServer: EmbeddedZookeeper = _
+
+ protected val conf: KyuubiConf = FlinkSQLEngine.kyuubiConf
+
+ protected def engineRefId: String
+
def withKyuubiConf: Map[String, String]
- protected val kyuubiConf: KyuubiConf = FlinkSQLEngine.kyuubiConf
protected var connectionUrl: String = _
override def beforeAll(): Unit = {
+ withKyuubiConf.foreach { case (k, v) =>
+ if (k.startsWith("flink.")) {
+ flinkConfig.setString(k.stripPrefix("flink."), v)
+ }
+ }
+ withKyuubiConf.foreach { case (k, v) =>
+ System.setProperty(k, v)
+ conf.set(k, v)
+ }
+
+ zkServer = new EmbeddedZookeeper()
+ conf.set(ZK_CLIENT_PORT, 0).set(ZK_CLIENT_PORT_ADDRESS, "localhost")
+ zkServer.initialize(conf)
+ zkServer.start()
+ conf.set(HA_ADDRESSES, zkServer.getConnectString)
+
+ val envs = scala.collection.mutable.Map[String, String]()
+ val kyuubiExternals = Utils.getCodeSourceLocation(getClass)
+ .split("externals").head
+ val flinkHome = {
+ val candidates = Paths.get(kyuubiExternals, "externals", "kyuubi-download", "target")
+ .toFile.listFiles(f => f.getName.contains("flink"))
+ if (candidates == null) None else candidates.map(_.toPath).headOption
+ }
+ if (flinkHome.isDefined) {
+ envs("FLINK_HOME") = flinkHome.get.toString
+ envs("FLINK_CONF_DIR") = Paths.get(flinkHome.get.toString, "conf").toString
+ }
+ envs("JAVA_HOME") = System.getProperty("java.home")
+ envs("JAVA_EXEC") = Paths.get(envs("JAVA_HOME"), "bin", "java").toString
+
startMiniCluster()
- startFlinkEngine()
+ startFlinkEngine(envs.toMap)
super.beforeAll()
}
override def afterAll(): Unit = {
super.afterAll()
- stopFlinkEngine()
- miniCluster.close()
+ if (engineProcess != null) {
+ engineProcess.destroy()
+ engineProcess = null
+ }
+ if (miniCluster != null) {
+ miniCluster.close()
+ miniCluster = null
+ }
+ if (zkServer != null) {
+ zkServer.stop()
+ zkServer = null
+ }
}
- def startFlinkEngine(): Unit = {
- withKyuubiConf.foreach { case (k, v) =>
- System.setProperty(k, v)
- kyuubiConf.set(k, v)
+ def startFlinkEngine(envs: Map[String, String]): Unit = {
+ val flinkHome = envs("FLINK_HOME")
+ val processBuilder: ProcessBuilder = new ProcessBuilder
+ processBuilder.environment().putAll(envs.asJava)
+
+ conf.set(ENGINE_FLINK_EXTRA_CLASSPATH, udfJar.getAbsolutePath)
+ val command = new ArrayBuffer[String]()
+
+ command += envs("JAVA_EXEC")
+
+ val memory = conf.get(ENGINE_FLINK_MEMORY)
+ command += s"-Xmx$memory"
+ val javaOptions = conf.get(ENGINE_FLINK_JAVA_OPTIONS)
+ if (javaOptions.isDefined) {
+ command += javaOptions.get
}
- val engineContext = new DefaultContext(
- List(udfJar.toURI.toURL).asJava,
- flinkConfig,
- List[CustomCommandLine](new DefaultCLI).asJava)
- FlinkSQLEngine.startEngine(engineContext)
- engine = FlinkSQLEngine.currentEngine.get
- connectionUrl = engine.frontendServices.head.connectionUrl
- }
- def stopFlinkEngine(): Unit = {
- if (engine != null) {
- engine.stop()
- engine = null
+ command += "-cp"
+ val classpathEntries = new java.util.LinkedHashSet[String]
+ // flink engine runtime jar
+ mainResource(envs).foreach(classpathEntries.add)
+ // flink sql jars
+ Paths.get(flinkHome)
+ .resolve("opt")
+ .toFile
+ .listFiles(new FilenameFilter {
+ override def accept(dir: File, name: String): Boolean = {
+ name.toLowerCase.startsWith("flink-sql-client") ||
+ name.toLowerCase.startsWith("flink-sql-gateway")
+ }
+ }).foreach(jar => classpathEntries.add(jar.getAbsolutePath))
+
+ // jars from flink lib
+ classpathEntries.add(s"$flinkHome${File.separator}lib${File.separator}*")
+
+ // classpath contains flink configurations, default to flink.home/conf
+ classpathEntries.add(envs.getOrElse("FLINK_CONF_DIR", ""))
+ // classpath contains hadoop configurations
+ val cp = System.getProperty("java.class.path")
+ // exclude kyuubi flink engine jar that has SPI for EmbeddedExecutorFactory
+ // which can't be initialized on the client side
+ val hadoopJars = cp.split(":").filter(s => !s.contains("flink"))
+ hadoopJars.foreach(classpathEntries.add)
+ val extraCp = conf.get(ENGINE_FLINK_EXTRA_CLASSPATH)
+ extraCp.foreach(classpathEntries.add)
+ if (hadoopJars.isEmpty && extraCp.isEmpty) {
+ mainResource(envs).foreach { path =>
+ val devHadoopJars = Paths.get(path).getParent
+ .resolve(s"scala-$SCALA_COMPILE_VERSION")
+ .resolve("jars")
+ if (!Files.exists(devHadoopJars)) {
+ throw new KyuubiException(s"The path $devHadoopJars does not exists. " +
+ s"Please set FLINK_HADOOP_CLASSPATH or ${ENGINE_FLINK_EXTRA_CLASSPATH.key}" +
+ s" for configuring location of hadoop client jars, etc.")
+ }
+ classpathEntries.add(s"$devHadoopJars${File.separator}*")
+ }
+ }
+ command += classpathEntries.asScala.mkString(File.pathSeparator)
+ command += "org.apache.kyuubi.engine.flink.FlinkSQLEngine"
+
+ conf.getAll.foreach { case (k, v) =>
+ command += "--conf"
+ command += s"$k=$v"
}
+
+ processBuilder.command(command.toList.asJava)
+ processBuilder.redirectOutput(Redirect.INHERIT)
+ processBuilder.redirectError(Redirect.INHERIT)
+
+ info(s"staring flink local engine...")
+ engineProcess = processBuilder.start()
}
private def startMiniCluster(): Unit = {
@@ -84,4 +193,35 @@ trait WithFlinkSQLEngineLocal extends KyuubiFunSuite with WithFlinkTestResources
protected def getJdbcUrl: String = s"jdbc:hive2://$connectionUrl/;"
+ def mainResource(env: Map[String, String]): Option[String] = {
+ val module = "kyuubi-flink-sql-engine"
+ val shortName = "flink"
+ // 1. get the main resource jar for user specified config first
+ val jarName = s"${module}_$SCALA_COMPILE_VERSION-$KYUUBI_VERSION.jar"
+ conf.getOption(s"kyuubi.session.engine.$shortName.main.resource").filter {
+ userSpecified =>
+ // skip check exist if not local file.
+ val uri = new URI(userSpecified)
+ val schema = if (uri.getScheme != null) uri.getScheme else "file"
+ schema match {
+ case "file" => Files.exists(Paths.get(userSpecified))
+ case _ => true
+ }
+ }.orElse {
+ // 2. get the main resource jar from system build default
+ env.get(KYUUBI_HOME).toSeq
+ .flatMap { p =>
+ Seq(
+ Paths.get(p, "externals", "engines", shortName, jarName),
+ Paths.get(p, "externals", module, "target", jarName))
+ }
+ .find(Files.exists(_)).map(_.toAbsolutePath.toFile.getCanonicalPath)
+ }.orElse {
+ // 3. get the main resource from dev environment
+ val cwd = Utils.getCodeSourceLocation(getClass).split("externals")
+ assert(cwd.length > 1)
+ Option(Paths.get(cwd.head, "externals", module, "target", jarName))
+ .map(_.toAbsolutePath.toFile.getCanonicalPath)
+ }
+ }
}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala
index 3847087b3fc..553574e65b4 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineOnYarn.scala
@@ -49,7 +49,7 @@ trait WithFlinkSQLEngineOnYarn extends KyuubiFunSuite with WithFlinkTestResource
private var zkServer: EmbeddedZookeeper = _
- def withKyuubiConf: Map[String, String]
+ def withKyuubiConf: Map[String, String] = testExtraConf
private val yarnConf: YarnConfiguration = {
val yarnConfig = new YarnConfiguration()
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala
index 6a85654f0d7..3ea02774eb3 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala
@@ -17,6 +17,8 @@
package org.apache.kyuubi.engine.flink
+import java.io.File
+
import org.apache.kyuubi.Utils
import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar
@@ -33,9 +35,12 @@ trait WithFlinkTestResources {
}
"""
- protected val udfJar = TestUserClassLoaderJar.createJarFile(
+ protected val udfJar: File = TestUserClassLoaderJar.createJarFile(
Utils.createTempDir("test-jar").toFile,
"test-classloader-udf.jar",
GENERATED_UDF_CLASS,
GENERATED_UDF_CODE)
+
+ protected val testExtraConf: Map[String, String] = Map(
+ "flink.pipeline.name" -> "test-job")
}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala
index e4e6a5c67ea..0f4b38d36d7 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationLocalSuite.scala
@@ -17,17 +17,35 @@
package org.apache.kyuubi.engine.flink.operation
+import java.util.UUID
+
import org.apache.kyuubi.config.KyuubiConf._
-import org.apache.kyuubi.engine.flink.WithFlinkSQLEngineLocal
+import org.apache.kyuubi.engine.ShareLevel
+import org.apache.kyuubi.engine.flink.{WithDiscoveryFlinkSQLEngine, WithFlinkSQLEngineLocal}
+import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_NAMESPACE}
import org.apache.kyuubi.operation.NoneMode
class FlinkOperationLocalSuite extends FlinkOperationSuite
- with WithFlinkSQLEngineLocal {
+ with WithDiscoveryFlinkSQLEngine with WithFlinkSQLEngineLocal {
+
+ protected def jdbcUrl: String = getFlinkEngineServiceUrl
+
+ override def withKyuubiConf: Map[String, String] = {
+ Map(
+ "flink.execution.target" -> "remote",
+ HA_NAMESPACE.key -> namespace,
+ HA_ENGINE_REF_ID.key -> engineRefId,
+ ENGINE_TYPE.key -> "FLINK_SQL",
+ ENGINE_SHARE_LEVEL.key -> shareLevel,
+ OPERATION_PLAN_ONLY_MODE.key -> NoneMode.name) ++ testExtraConf
+ }
+
+ override protected def engineRefId: String = UUID.randomUUID().toString
+
+ def namespace: String = "/kyuubi/flink-local-engine-test"
- override def withKyuubiConf: Map[String, String] =
- Map(OPERATION_PLAN_ONLY_MODE.key -> NoneMode.name)
+ def shareLevel: String = ShareLevel.USER.toString
- override protected def jdbcUrl: String =
- s"jdbc:hive2://${engine.frontendServices.head.connectionUrl}/;"
+ def engineType: String = "flink"
}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala
index b43e83db6cc..931d500f7c7 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationOnYarnSuite.scala
@@ -17,10 +17,31 @@
package org.apache.kyuubi.engine.flink.operation
-import org.apache.kyuubi.engine.flink.WithDiscoveryFlinkSQLEngine
+import java.util.UUID
+
+import org.apache.kyuubi.config.KyuubiConf.{ENGINE_SHARE_LEVEL, ENGINE_TYPE}
+import org.apache.kyuubi.engine.ShareLevel
+import org.apache.kyuubi.engine.flink.{WithDiscoveryFlinkSQLEngine, WithFlinkSQLEngineOnYarn}
+import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_NAMESPACE}
class FlinkOperationOnYarnSuite extends FlinkOperationSuite
- with WithDiscoveryFlinkSQLEngine {
+ with WithDiscoveryFlinkSQLEngine with WithFlinkSQLEngineOnYarn {
protected def jdbcUrl: String = getFlinkEngineServiceUrl
+
+ override def withKyuubiConf: Map[String, String] = {
+ Map(
+ HA_NAMESPACE.key -> namespace,
+ HA_ENGINE_REF_ID.key -> engineRefId,
+ ENGINE_TYPE.key -> "FLINK_SQL",
+ ENGINE_SHARE_LEVEL.key -> shareLevel) ++ testExtraConf
+ }
+
+ override protected def engineRefId: String = UUID.randomUUID().toString
+
+ def namespace: String = "/kyuubi/flink-yarn-application-test"
+
+ def shareLevel: String = ShareLevel.USER.toString
+
+ def engineType: String = "flink"
}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
index 77ce3b3eebb..908e407a933 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
@@ -23,12 +23,13 @@ import java.util.UUID
import scala.collection.JavaConverters._
import org.apache.flink.api.common.JobID
+import org.apache.flink.configuration.PipelineOptions
import org.apache.flink.table.types.logical.LogicalTypeRoot
import org.apache.hive.service.rpc.thrift._
import org.apache.kyuubi.Utils
import org.apache.kyuubi.config.KyuubiConf._
-import org.apache.kyuubi.engine.flink.WithFlinkTestResources
+import org.apache.kyuubi.engine.flink.{FlinkEngineUtils, WithFlinkTestResources}
import org.apache.kyuubi.engine.flink.result.Constants
import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar
import org.apache.kyuubi.jdbc.hive.KyuubiStatement
@@ -758,7 +759,7 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
val metaData = resultSet.getMetaData
assert(metaData.getColumnType(1) === java.sql.Types.ARRAY)
assert(resultSet.next())
- val expected = "[v1,v2,v3]"
+ val expected = "[\"v1\",\"v2\",\"v3\"]"
assert(resultSet.getObject(1).toString == expected)
}
}
@@ -778,7 +779,7 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("select (1, '2', true)")
assert(resultSet.next())
- val expected = """{INT NOT NULL:1,CHAR(1) NOT NULL:2,BOOLEAN NOT NULL:true}"""
+ val expected = """{INT NOT NULL:1,CHAR(1) NOT NULL:"2",BOOLEAN NOT NULL:true}"""
assert(resultSet.getString(1) == expected)
val metaData = resultSet.getMetaData
assert(metaData.getColumnType(1) === java.sql.Types.STRUCT)
@@ -955,7 +956,7 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
statement.executeQuery("create table tbl_a (a int) with ('connector' = 'blackhole')")
val resultSet = statement.executeQuery("insert into tbl_a select 1")
val metadata = resultSet.getMetaData
- assert(metadata.getColumnName(1) === "result")
+ assert(metadata.getColumnName(1) === "job id")
assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR)
assert(resultSet.next())
assert(resultSet.getString(1).length == 32)
@@ -973,7 +974,7 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
statement.executeQuery("create table tbl_b (a int) with ('connector' = 'blackhole')")
val resultSet = statement.executeQuery("insert into tbl_b select * from tbl_a")
val metadata = resultSet.getMetaData
- assert(metadata.getColumnName(1) === "result")
+ assert(metadata.getColumnName(1) === "job id")
assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR)
assert(resultSet.next())
assert(resultSet.getString(1).length == 32)
@@ -984,11 +985,9 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
withMultipleConnectionJdbcStatement() { statement =>
val resultSet = statement.executeQuery("set table.dynamic-table-options.enabled = true")
val metadata = resultSet.getMetaData
- assert(metadata.getColumnName(1) == "key")
- assert(metadata.getColumnName(2) == "value")
+ assert(metadata.getColumnName(1) == "result")
assert(resultSet.next())
- assert(resultSet.getString(1) == "table.dynamic-table-options.enabled")
- assert(resultSet.getString(2) == "true")
+ assert(resultSet.getString(1) == "OK")
}
}
@@ -1003,16 +1002,17 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
}
test("execute statement - reset property") {
+ val originalName = "test-job" // defined in WithFlinkTestResource
withMultipleConnectionJdbcStatement() { statement =>
- statement.executeQuery("set pipeline.jars = my.jar")
- statement.executeQuery("reset pipeline.jars")
+ statement.executeQuery(s"set ${PipelineOptions.NAME.key()} = wrong-name")
+ statement.executeQuery(s"reset ${PipelineOptions.NAME.key()}")
val resultSet = statement.executeQuery("set")
// Flink does not support set key without value currently,
// thus read all rows to find the desired one
var success = false
while (resultSet.next()) {
- if (resultSet.getString(1) == "pipeline.jars" &&
- !resultSet.getString(2).contains("my.jar")) {
+ if (resultSet.getString(1) == PipelineOptions.NAME.key() &&
+ resultSet.getString(2).equals(originalName)) {
success = true
}
}
@@ -1066,7 +1066,31 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
}
}
- test("execute statement - add/remove/show jar") {
+ test("execute statement - add/show jar") {
+ val jarName = s"newly-added-${UUID.randomUUID()}.jar"
+ val newJar = TestUserClassLoaderJar.createJarFile(
+ Utils.createTempDir("add-jar-test").toFile,
+ jarName,
+ GENERATED_UDF_CLASS,
+ GENERATED_UDF_CODE).toPath
+
+ withMultipleConnectionJdbcStatement()({ statement =>
+ statement.execute(s"add jar '$newJar'")
+
+ val showJarsResultAdded = statement.executeQuery("show jars")
+ var exists = false
+ while (showJarsResultAdded.next()) {
+ if (showJarsResultAdded.getString(1).contains(jarName)) {
+ exists = true
+ }
+ }
+ assert(exists)
+ })
+ }
+
+ // ignored because Flink gateway doesn't support remove-jar statements
+ // see org.apache.flink.table.gateway.service.operation.OperationExecutor#callRemoveJar(..)
+ ignore("execute statement - remove jar") {
val jarName = s"newly-added-${UUID.randomUUID()}.jar"
val newJar = TestUserClassLoaderJar.createJarFile(
Utils.createTempDir("add-jar-test").toFile,
@@ -1136,9 +1160,12 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
assert(stmt.asInstanceOf[KyuubiStatement].getQueryId === null)
stmt.executeQuery("insert into tbl_a values (1)")
val queryId = stmt.asInstanceOf[KyuubiStatement].getQueryId
- assert(queryId !== null)
- // parse the string to check if it's valid Flink job id
- assert(JobID.fromHexString(queryId) !== null)
+ // Flink 1.16 doesn't support query id via ResultFetcher
+ if (FlinkEngineUtils.isFlinkVersionAtLeast("1.17")) {
+ assert(queryId !== null)
+ // parse the string to check if it's valid Flink job id
+ assert(JobID.fromHexString(queryId) !== null)
+ }
}
}
}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala
index 1657f21f61d..17c49464fae 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/PlanOnlyOperationSuite.scala
@@ -18,21 +18,33 @@
package org.apache.kyuubi.engine.flink.operation
import java.sql.Statement
+import java.util.UUID
import org.apache.kyuubi.config.KyuubiConf
-import org.apache.kyuubi.engine.flink.WithFlinkSQLEngineLocal
+import org.apache.kyuubi.engine.flink.{WithDiscoveryFlinkSQLEngine, WithFlinkSQLEngineLocal}
+import org.apache.kyuubi.ha.HighAvailabilityConf.{HA_ENGINE_REF_ID, HA_NAMESPACE}
import org.apache.kyuubi.operation.{AnalyzeMode, ExecutionMode, HiveJDBCTestHelper, ParseMode, PhysicalMode}
-class PlanOnlyOperationSuite extends WithFlinkSQLEngineLocal with HiveJDBCTestHelper {
+class PlanOnlyOperationSuite extends WithFlinkSQLEngineLocal
+ with HiveJDBCTestHelper with WithDiscoveryFlinkSQLEngine {
+
+ override protected def engineRefId: String = UUID.randomUUID().toString
+
+ override protected def namespace: String = "/kyuubi/flink-plan-only-test"
+
+ def engineType: String = "flink"
override def withKyuubiConf: Map[String, String] =
Map(
+ "flink.execution.target" -> "remote",
+ HA_NAMESPACE.key -> namespace,
+ HA_ENGINE_REF_ID.key -> engineRefId,
+ KyuubiConf.ENGINE_TYPE.key -> "FLINK_SQL",
KyuubiConf.ENGINE_SHARE_LEVEL.key -> "user",
KyuubiConf.OPERATION_PLAN_ONLY_MODE.key -> ParseMode.name,
- KyuubiConf.ENGINE_SHARE_LEVEL_SUBDOMAIN.key -> "plan-only")
+ KyuubiConf.ENGINE_SHARE_LEVEL_SUBDOMAIN.key -> "plan-only") ++ testExtraConf
- override protected def jdbcUrl: String =
- s"jdbc:hive2://${engine.frontendServices.head.connectionUrl}/;"
+ override protected def jdbcUrl: String = getFlinkEngineServiceUrl
test("Plan only operation with system defaults") {
withJdbcStatement() { statement =>
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala
index 02aed2866d6..8c92b77efd0 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KyuubiApplicationManager.scala
@@ -176,7 +176,7 @@ object KyuubiApplicationManager {
// if the master is not identified ahead, add all tags
setupSparkYarnTag(applicationTag, conf)
setupSparkK8sTag(applicationTag, conf)
- case ("FLINK", _) =>
+ case ("FLINK", Some("YARN")) =>
// running flink on other platforms is not yet supported
setupFlinkYarnTag(applicationTag, conf)
// other engine types are running locally yet
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala
index 8642d87d7f6..d8d46e42791 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilder.scala
@@ -54,6 +54,9 @@ class FlinkProcessBuilder(
Paths.get(flinkHome, "bin", FLINK_EXEC_FILE).toFile.getCanonicalPath
}
+ // flink.execution.target are required in Kyuubi conf currently
+ val executionTarget: Option[String] = conf.getOption("flink.execution.target")
+
override protected def module: String = "kyuubi-flink-sql-engine"
override protected def mainClass: String = "org.apache.kyuubi.engine.flink.FlinkSQLEngine"
@@ -63,13 +66,17 @@ class FlinkProcessBuilder(
"FLINK_CONF_DIR",
s"$flinkHome${File.separator}conf"))
- override def clusterManager(): Option[String] = Some("yarn")
+ override def clusterManager(): Option[String] = {
+ executionTarget match {
+ case Some("yarn-application") => Some("yarn")
+ case _ => None
+ }
+ }
override protected val commands: Array[String] = {
KyuubiApplicationManager.tagApplication(engineRefId, shortName, clusterManager(), conf)
// flink.execution.target are required in Kyuubi conf currently
- val executionTarget = conf.getOption("flink.execution.target")
executionTarget match {
case Some("yarn-application") =>
val buffer = new ArrayBuffer[String]()
@@ -133,16 +140,16 @@ class FlinkProcessBuilder(
val classpathEntries = new java.util.LinkedHashSet[String]
// flink engine runtime jar
mainResource.foreach(classpathEntries.add)
- // flink sql client jar
- val flinkSqlClientPath = Paths.get(flinkHome)
+ // flink sql jars
+ Paths.get(flinkHome)
.resolve("opt")
.toFile
.listFiles(new FilenameFilter {
override def accept(dir: File, name: String): Boolean = {
- name.toLowerCase.startsWith("flink-sql-client")
+ name.toLowerCase.startsWith("flink-sql-client") ||
+ name.toLowerCase.startsWith("flink-sql-gateway")
}
- }).head.getAbsolutePath
- classpathEntries.add(flinkSqlClientPath)
+ }).sorted.foreach(jar => classpathEntries.add(jar.getAbsolutePath))
// jars from flink lib
classpathEntries.add(s"$flinkHome${File.separator}lib${File.separator}*")
diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala
index 53450b5897a..45272618df7 100644
--- a/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala
+++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/engine/flink/FlinkProcessBuilderSuite.scala
@@ -63,7 +63,7 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite {
private def envWithAllHadoop: ListMap[String, String] = envWithoutHadoopCLASSPATH +
(FLINK_HADOOP_CLASSPATH_KEY -> s"${File.separator}hadoop")
private def confStr: String = {
- sessionModeConf.clone.set("yarn.tags", "KYUUBI").getAll
+ sessionModeConf.clone.getAll
.map { case (k, v) => s"\\\\\\n\\t--conf $k=$v" }
.mkString(" ")
}
@@ -106,6 +106,7 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite {
val flinkHome = builder.flinkHome
classpathEntries.add(s"$flinkHome$flinkSqlClientJarPathSuffixRegex")
+ classpathEntries.add(s"$flinkHome$flinkSqlGatewayJarPathSuffixRegex")
classpathEntries.add(s"$flinkHome$flinkLibPathSuffixRegex")
classpathEntries.add(s"$flinkHome$flinkConfPathSuffix")
val envMap = builder.env
@@ -123,6 +124,8 @@ class FlinkProcessBuilderSuite extends KyuubiFunSuite {
private val javaPath = s"${envDefault("JAVA_HOME")}${File.separator}bin${File.separator}java"
private val flinkSqlClientJarPathSuffixRegex = s"${File.separator}opt${File.separator}" +
s"flink-sql-client-.*.jar"
+ private val flinkSqlGatewayJarPathSuffixRegex = s"${File.separator}opt${File.separator}" +
+ s"flink-sql-gateway-.*.jar"
private val flinkLibPathSuffixRegex = s"${File.separator}lib${File.separator}\\*"
private val flinkConfPathSuffix = s"${File.separator}conf"
private val mainClassStr = "org.apache.kyuubi.engine.flink.FlinkSQLEngine"
diff --git a/pom.xml b/pom.xml
index 520b181d52f..1feae0322a9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -135,7 +135,7 @@
2.4.40.9.30.62.2
- 1.16.1
+ 1.17.0flink-${flink.version}-bin-scala_${scala.binary.version}.tgz${apache.archive.dist}/flink/flink-${flink.version}false
@@ -1624,6 +1624,12 @@
${flink.version}
+
+ org.apache.flink
+ flink-sql-gateway
+ ${flink.version}
+
+
org.apache.flinkflink-test-utils
@@ -2390,16 +2396,16 @@
- flink-1.15
+ flink-1.16
- 1.15.4
+ 1.16.1
- flink-1.16
+ flink-1.17
- 1.16.1
+ 1.17.0
From 8d0b3ff50cd8a59733731663741c8d59dc8ef3c6 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Mon, 17 Apr 2023 20:26:12 +0800
Subject: [PATCH 046/404] [KYUUBI #4387][DOCS][FOLLOWUP] Update Flink version
requirements
### _Why are the changes needed?_
Update docs to mention that Kyuubi requires Flink 1.15+ now
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4721 from pan3793/doc.
Closes #4387
4fdd9082b [Cheng Pan] [KYUUBI #4387][DOCS][FOLLOUP] Update Flink version requirements
Authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
docs/quick_start/quick_start.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/quick_start/quick_start.rst b/docs/quick_start/quick_start.rst
index db564edb92c..fb1b12c2d8c 100644
--- a/docs/quick_start/quick_start.rst
+++ b/docs/quick_start/quick_start.rst
@@ -44,7 +44,7 @@ pre-installed and the `JAVA_HOME` is correctly set to each component.
Engine lib - Kyuubi Engine
Beeline - Kyuubi Hive Beeline
**Spark** Engine >=3.0.0 A Spark distribution
- **Flink** Engine >=1.14.0 A Flink distribution
+ **Flink** Engine >=1.15.0 A Flink distribution
**Trino** Engine >=363 A Trino cluster
**Doris** Engine N/A A Doris cluster
**Hive** Engine - 3.1.x - A Hive distribution
From b3dc4f8e3050b5341b34cb9f5f4f704d8b2c0fdf Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Mon, 17 Apr 2023 20:44:21 +0800
Subject: [PATCH 047/404] [KYUUBI #4722] [DOCS] Kyuubi requires Spark 3.1 and
above now
### _Why are the changes needed?_
Kyuubi requires Spark 3.1 and above since 1.7.0
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4722 from pan3793/spark-3.1.
Closes #4722
e2b4c9ca2 [Cheng Pan] [DOCS] Kyuubi requires Spark 3.1 and above now
Authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
docs/quick_start/quick_start.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/quick_start/quick_start.rst b/docs/quick_start/quick_start.rst
index fb1b12c2d8c..388aaf21718 100644
--- a/docs/quick_start/quick_start.rst
+++ b/docs/quick_start/quick_start.rst
@@ -43,7 +43,7 @@ pre-installed and the `JAVA_HOME` is correctly set to each component.
**Kyuubi** Gateway \ |release| \ - Kyuubi Server
Engine lib - Kyuubi Engine
Beeline - Kyuubi Hive Beeline
- **Spark** Engine >=3.0.0 A Spark distribution
+ **Spark** Engine >=3.1 A Spark distribution
**Flink** Engine >=1.15.0 A Flink distribution
**Trino** Engine >=363 A Trino cluster
**Doris** Engine N/A A Doris cluster
From a4cdb26ca7f4ffd39e08b0c842b9759cd56eb6d7 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Mon, 17 Apr 2023 23:02:18 +0800
Subject: [PATCH 048/404] [KYUUBI #4367][DOCS][FOLLOWUP] Kyuubi requires Flink
1.16/1.17
### _Why are the changes needed?_
Kyuubi requires Flink 1.16/1.17 now
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4723 from pan3793/flink-1.17.
Closes #4367
1e64dcbf1 [Cheng Pan] [KYUUBI #4367][DOCS][FOLLOWUP] Kyuubi requries Flink 1.16/1.17
Authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
docs/quick_start/quick_start.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/quick_start/quick_start.rst b/docs/quick_start/quick_start.rst
index 388aaf21718..2cf5f567fcb 100644
--- a/docs/quick_start/quick_start.rst
+++ b/docs/quick_start/quick_start.rst
@@ -44,7 +44,7 @@ pre-installed and the `JAVA_HOME` is correctly set to each component.
Engine lib - Kyuubi Engine
Beeline - Kyuubi Hive Beeline
**Spark** Engine >=3.1 A Spark distribution
- **Flink** Engine >=1.15.0 A Flink distribution
+ **Flink** Engine 1.16/1.17 A Flink distribution
**Trino** Engine >=363 A Trino cluster
**Doris** Engine N/A A Doris cluster
**Hive** Engine - 3.1.x - A Hive distribution
From a9a4766778d6894561efb32cdb1c3e73d6714b87 Mon Sep 17 00:00:00 2001
From: pengqli
Date: Mon, 17 Apr 2023 23:03:49 +0800
Subject: [PATCH 049/404] [KYUUBI #4698] [K8S][HELM] Centralize Kyuubi labels
definition
### _Why are the changes needed?_
It's a default implementation created by helm create chart_name. Label info is relatively fixed and is frequently used in templates, reducing code redundancy. Define common tags that make yaml cleaner and easy to read.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4698 from dev-lpq/helm_define_lables.
Closes #4698
7f60391c5 [pengqli] change the labels comment
5b246dd94 [pengqli] Merge branch 'master' into helm_define_lables
f480772f0 [pengqli] change the kyuubi labels name
e98d1d9a5 [pengqli] define Kyuubi labels template
Authored-by: pengqli
Signed-off-by: Cheng Pan
---
charts/kyuubi/templates/_helpers.tpl | 18 ++++++++++++++++++
charts/kyuubi/templates/kyuubi-configmap.yaml | 6 +-----
charts/kyuubi/templates/kyuubi-deployment.yaml | 12 +++---------
charts/kyuubi/templates/kyuubi-role.yaml | 6 +-----
.../kyuubi/templates/kyuubi-rolebinding.yaml | 6 +-----
charts/kyuubi/templates/kyuubi-service.yaml | 9 ++-------
.../templates/kyuubi-serviceaccount.yaml | 6 +-----
7 files changed, 27 insertions(+), 36 deletions(-)
diff --git a/charts/kyuubi/templates/_helpers.tpl b/charts/kyuubi/templates/_helpers.tpl
index 07e66d5f182..4c9da32b95e 100644
--- a/charts/kyuubi/templates/_helpers.tpl
+++ b/charts/kyuubi/templates/_helpers.tpl
@@ -31,3 +31,21 @@ For details, see 'kyuubi.frontend.protocols': https://kyuubi.readthedocs.io/en/m
{{- end }}
{{- $protocols | join "," }}
{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "kyuubi.selectorLabels" -}}
+app.kubernetes.io/name: {{ .Chart.Name }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "kyuubi.labels" -}}
+helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+{{ include "kyuubi.selectorLabels" . }}
+app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/charts/kyuubi/templates/kyuubi-configmap.yaml b/charts/kyuubi/templates/kyuubi-configmap.yaml
index 22d6562b8d8..3e7281083cb 100644
--- a/charts/kyuubi/templates/kyuubi-configmap.yaml
+++ b/charts/kyuubi/templates/kyuubi-configmap.yaml
@@ -20,11 +20,7 @@ kind: ConfigMap
metadata:
name: {{ .Release.Name }}
labels:
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
- app.kubernetes.io/name: {{ .Chart.Name }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
+ {{- include "kyuubi.labels" . | nindent 4 }}
data:
{{- with .Values.kyuubiConf.kyuubiEnv }}
kyuubi-env.sh: |
diff --git a/charts/kyuubi/templates/kyuubi-deployment.yaml b/charts/kyuubi/templates/kyuubi-deployment.yaml
index b30913dd09f..beca0998a08 100644
--- a/charts/kyuubi/templates/kyuubi-deployment.yaml
+++ b/charts/kyuubi/templates/kyuubi-deployment.yaml
@@ -20,22 +20,16 @@ kind: Deployment
metadata:
name: {{ .Release.Name }}
labels:
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
- app.kubernetes.io/name: {{ .Chart.Name }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
+ {{- include "kyuubi.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
- app.kubernetes.io/name: {{ .Chart.Name }}
- app.kubernetes.io/instance: {{ .Release.Name }}
+ {{- include "kyuubi.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
- app.kubernetes.io/name: {{ .Chart.Name }}
- app.kubernetes.io/instance: {{ .Release.Name }}
+ {{- include "kyuubi.selectorLabels" . | nindent 8 }}
annotations:
checksum/conf: {{ include (print $.Template.BasePath "/kyuubi-configmap.yaml") . | sha256sum }}
spec:
diff --git a/charts/kyuubi/templates/kyuubi-role.yaml b/charts/kyuubi/templates/kyuubi-role.yaml
index 7e0a810a1aa..5ee8c1dff5a 100644
--- a/charts/kyuubi/templates/kyuubi-role.yaml
+++ b/charts/kyuubi/templates/kyuubi-role.yaml
@@ -21,10 +21,6 @@ kind: Role
metadata:
name: {{ .Release.Name }}
labels:
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
- app.kubernetes.io/name: {{ .Chart.Name }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
+ {{- include "kyuubi.labels" . | nindent 4 }}
rules: {{- toYaml .Values.rbac.rules | nindent 2 }}
{{- end }}
diff --git a/charts/kyuubi/templates/kyuubi-rolebinding.yaml b/charts/kyuubi/templates/kyuubi-rolebinding.yaml
index e7dd0d64b9f..0f9dbd049c0 100644
--- a/charts/kyuubi/templates/kyuubi-rolebinding.yaml
+++ b/charts/kyuubi/templates/kyuubi-rolebinding.yaml
@@ -21,11 +21,7 @@ kind: RoleBinding
metadata:
name: {{ .Release.Name }}
labels:
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
- app.kubernetes.io/name: {{ .Chart.Name }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
+ {{- include "kyuubi.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.name | default .Release.Name }}
diff --git a/charts/kyuubi/templates/kyuubi-service.yaml b/charts/kyuubi/templates/kyuubi-service.yaml
index ddc2e230f4d..64c8b06ac20 100644
--- a/charts/kyuubi/templates/kyuubi-service.yaml
+++ b/charts/kyuubi/templates/kyuubi-service.yaml
@@ -22,11 +22,7 @@ kind: Service
metadata:
name: {{ $.Release.Name }}-{{ $name | kebabcase }}
labels:
- helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }}
- app.kubernetes.io/name: {{ $.Chart.Name }}
- app.kubernetes.io/instance: {{ $.Release.Name }}
- app.kubernetes.io/version: {{ $.Values.image.tag | default $.Chart.AppVersion | quote }}
- app.kubernetes.io/managed-by: {{ $.Release.Service }}
+ {{- include "kyuubi.labels" $ | nindent 4 }}
{{- with $frontend.service.annotations }}
annotations: {{- toYaml . | nindent 4 }}
{{- end }}
@@ -40,8 +36,7 @@ spec:
nodePort: {{ $frontend.service.nodePort }}
{{- end }}
selector:
- app.kubernetes.io/name: {{ $.Chart.Name }}
- app.kubernetes.io/instance: {{ $.Release.Name }}
+ {{- include "kyuubi.selectorLabels" $ | nindent 4 }}
---
{{- end }}
{{- end }}
diff --git a/charts/kyuubi/templates/kyuubi-serviceaccount.yaml b/charts/kyuubi/templates/kyuubi-serviceaccount.yaml
index bbfa22e35f8..a8e282a1fba 100644
--- a/charts/kyuubi/templates/kyuubi-serviceaccount.yaml
+++ b/charts/kyuubi/templates/kyuubi-serviceaccount.yaml
@@ -21,9 +21,5 @@ kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccount.name | default .Release.Name }}
labels:
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
- app.kubernetes.io/name: {{ .Chart.Name }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
+ {{- include "kyuubi.labels" . | nindent 4 }}
{{- end }}
From 258172fde921f572d343eb10710d098222c83a98 Mon Sep 17 00:00:00 2001
From: Deng An <36296995+packyan@users.noreply.github.com>
Date: Tue, 18 Apr 2023 00:31:47 +0800
Subject: [PATCH 050/404] Revert "[KYUUBI #4712] Bump Spark from 3.2.3 to
3.2.4"
This reverts commit 93ba8f762f35e5467dbb6cd51ef4e82ba2f74d05.
---
.github/workflows/master.yml | 2 +-
...plugin.spark.authz.serde.FunctionExtractor | 1 -
...in.spark.authz.serde.FunctionTypeExtractor | 1 -
.../src/main/resources/scan_command_spec.json | 29 +++
.../src/main/resources/scan_spec.json | 89 --------
.../main/resources/table_command_spec.json | 16 +-
.../spark/authz/PrivilegesBuilder.scala | 23 --
.../spark/authz/serde/CommandSpec.scala | 16 +-
.../authz/serde/functionExtractors.scala | 22 --
.../authz/serde/functionTypeExtractors.scala | 36 +---
.../plugin/spark/authz/serde/package.scala | 20 +-
.../FunctionPrivilegesBuilderSuite.scala | 196 ------------------
.../authz/gen/JsonSpecFileGenerator.scala | 9 +-
.../kyuubi/plugin/spark/authz/gen/Scans.scala | 28 +--
.../spark/authz/gen/TableCommands.scala | 2 +-
pom.xml | 2 +-
16 files changed, 56 insertions(+), 436 deletions(-)
create mode 100644 extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json
delete mode 100644 extensions/spark/kyuubi-spark-authz/src/main/resources/scan_spec.json
delete mode 100644 extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 04ecb1a601a..90d51fa959f 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -61,7 +61,7 @@ jobs:
comment: 'verify-on-spark-3.1-binary'
- java: 8
spark: '3.3'
- spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.2.4 -Dspark.archive.name=spark-3.2.4-bin-hadoop3.2.tgz'
+ spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.2.3 -Dspark.archive.name=spark-3.2.3-bin-hadoop3.2.tgz'
exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.HudiTest,org.apache.kyuubi.tags.IcebergTest'
comment: 'verify-on-spark-3.2-binary'
env:
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor
index 2facb004a04..4686bb033cf 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor
@@ -17,5 +17,4 @@
org.apache.kyuubi.plugin.spark.authz.serde.ExpressionInfoFunctionExtractor
org.apache.kyuubi.plugin.spark.authz.serde.FunctionIdentifierFunctionExtractor
-org.apache.kyuubi.plugin.spark.authz.serde.QualifiedNameStringFunctionExtractor
org.apache.kyuubi.plugin.spark.authz.serde.StringFunctionExtractor
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor
index 3bb0ee6c23e..475f47afc24 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/META-INF/services/org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor
@@ -17,5 +17,4 @@
org.apache.kyuubi.plugin.spark.authz.serde.ExpressionInfoFunctionTypeExtractor
org.apache.kyuubi.plugin.spark.authz.serde.FunctionIdentifierFunctionTypeExtractor
-org.apache.kyuubi.plugin.spark.authz.serde.FunctionNameFunctionTypeExtractor
org.apache.kyuubi.plugin.spark.authz.serde.TempMarkerFunctionTypeExtractor
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json
new file mode 100644
index 00000000000..9a6aef4ed98
--- /dev/null
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_command_spec.json
@@ -0,0 +1,29 @@
+[ {
+ "classname" : "org.apache.kyuubi.plugin.spark.authz.util.PermanentViewMarker",
+ "scanDescs" : [ {
+ "fieldName" : "catalogTable",
+ "fieldExtractor" : "CatalogTableTableExtractor",
+ "catalogDesc" : null
+ } ]
+}, {
+ "classname" : "org.apache.spark.sql.catalyst.catalog.HiveTableRelation",
+ "scanDescs" : [ {
+ "fieldName" : "tableMeta",
+ "fieldExtractor" : "CatalogTableTableExtractor",
+ "catalogDesc" : null
+ } ]
+}, {
+ "classname" : "org.apache.spark.sql.execution.datasources.LogicalRelation",
+ "scanDescs" : [ {
+ "fieldName" : "catalogTable",
+ "fieldExtractor" : "CatalogTableOptionTableExtractor",
+ "catalogDesc" : null
+ } ]
+}, {
+ "classname" : "org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation",
+ "scanDescs" : [ {
+ "fieldName" : null,
+ "fieldExtractor" : "DataSourceV2RelationTableExtractor",
+ "catalogDesc" : null
+ } ]
+} ]
\ No newline at end of file
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_spec.json
deleted file mode 100644
index 3273ccbeaf0..00000000000
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/scan_spec.json
+++ /dev/null
@@ -1,89 +0,0 @@
-[ {
- "classname" : "org.apache.kyuubi.plugin.spark.authz.util.PermanentViewMarker",
- "scanDescs" : [ {
- "fieldName" : "catalogTable",
- "fieldExtractor" : "CatalogTableTableExtractor",
- "catalogDesc" : null
- } ],
- "functionDescs" : [ ]
-}, {
- "classname" : "org.apache.spark.sql.catalyst.catalog.HiveTableRelation",
- "scanDescs" : [ {
- "fieldName" : "tableMeta",
- "fieldExtractor" : "CatalogTableTableExtractor",
- "catalogDesc" : null
- } ],
- "functionDescs" : [ ]
-}, {
- "classname" : "org.apache.spark.sql.execution.datasources.LogicalRelation",
- "scanDescs" : [ {
- "fieldName" : "catalogTable",
- "fieldExtractor" : "CatalogTableOptionTableExtractor",
- "catalogDesc" : null
- } ],
- "functionDescs" : [ ]
-}, {
- "classname" : "org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation",
- "scanDescs" : [ {
- "fieldName" : null,
- "fieldExtractor" : "DataSourceV2RelationTableExtractor",
- "catalogDesc" : null
- } ],
- "functionDescs" : [ ]
-}, {
- "classname" : "org.apache.spark.sql.hive.HiveGenericUDF",
- "scanDescs" : [ ],
- "functionDescs" : [ {
- "fieldName" : "name",
- "fieldExtractor" : "QualifiedNameStringFunctionExtractor",
- "databaseDesc" : null,
- "functionTypeDesc" : {
- "fieldName" : "name",
- "fieldExtractor" : "FunctionNameFunctionTypeExtractor",
- "skipTypes" : [ "TEMP", "SYSTEM" ]
- },
- "isInput" : true
- } ]
-}, {
- "classname" : "org.apache.spark.sql.hive.HiveGenericUDTF",
- "scanDescs" : [ ],
- "functionDescs" : [ {
- "fieldName" : "name",
- "fieldExtractor" : "QualifiedNameStringFunctionExtractor",
- "databaseDesc" : null,
- "functionTypeDesc" : {
- "fieldName" : "name",
- "fieldExtractor" : "FunctionNameFunctionTypeExtractor",
- "skipTypes" : [ "TEMP", "SYSTEM" ]
- },
- "isInput" : true
- } ]
-}, {
- "classname" : "org.apache.spark.sql.hive.HiveSimpleUDF",
- "scanDescs" : [ ],
- "functionDescs" : [ {
- "fieldName" : "name",
- "fieldExtractor" : "QualifiedNameStringFunctionExtractor",
- "databaseDesc" : null,
- "functionTypeDesc" : {
- "fieldName" : "name",
- "fieldExtractor" : "FunctionNameFunctionTypeExtractor",
- "skipTypes" : [ "TEMP", "SYSTEM" ]
- },
- "isInput" : true
- } ]
-}, {
- "classname" : "org.apache.spark.sql.hive.HiveUDAFFunction",
- "scanDescs" : [ ],
- "functionDescs" : [ {
- "fieldName" : "name",
- "fieldExtractor" : "QualifiedNameStringFunctionExtractor",
- "databaseDesc" : null,
- "functionTypeDesc" : {
- "fieldName" : "name",
- "fieldExtractor" : "FunctionNameFunctionTypeExtractor",
- "skipTypes" : [ "TEMP", "SYSTEM" ]
- },
- "isInput" : true
- } ]
-} ]
\ No newline at end of file
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
index 3d6fcd93ba7..81ccd8da085 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
@@ -1243,6 +1243,14 @@
"fieldName" : "query",
"fieldExtractor" : "LogicalPlanQueryExtractor"
} ]
+}, {
+ "classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand",
+ "tableDescs" : [ ],
+ "opType" : "QUERY",
+ "queryDescs" : [ {
+ "fieldName" : "query",
+ "fieldExtractor" : "LogicalPlanQueryExtractor"
+ } ]
}, {
"classname" : "org.apache.spark.sql.execution.datasources.RefreshTable",
"tableDescs" : [ {
@@ -1285,14 +1293,6 @@
"fieldName" : "query",
"fieldExtractor" : "LogicalPlanQueryExtractor"
} ]
-}, {
- "classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand",
- "tableDescs" : [ ],
- "opType" : "QUERY",
- "queryDescs" : [ {
- "fieldName" : "query",
- "fieldExtractor" : "LogicalPlanQueryExtractor"
- } ]
}, {
"classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveTable",
"tableDescs" : [ {
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala
index 98e4361894c..b8220ea2732 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilder.scala
@@ -235,27 +235,4 @@ object PrivilegesBuilder {
}
(inputObjs, outputObjs, opType)
}
-
- /**
- * Build input privilege objects from a Spark's LogicalPlan for hive permanent functions
- *
- * For `Command`s and other queries, build inputs.
- *
- * @param plan A Spark LogicalPlan
- */
- def buildFunctionPrivileges(
- plan: LogicalPlan,
- spark: SparkSession): PrivilegesAndOpType = {
- val inputObjs = new ArrayBuffer[PrivilegeObject]
- plan transformAllExpressions {
- case hiveFunction: Expression if isKnowFunction(hiveFunction) =>
- val functionSpec: ScanSpec = getFunctionSpec(hiveFunction)
- if (functionSpec.functionDescs.exists(!_.functionTypeDesc.get.skip(hiveFunction, spark))) {
- functionSpec.functions(hiveFunction).foreach(func =>
- inputObjs += PrivilegeObject(func))
- }
- hiveFunction
- }
- (inputObjs, Seq.empty, OperationType.QUERY)
- }
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala
index 32ad30e211f..e96ef8cbfd6 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/CommandSpec.scala
@@ -19,7 +19,6 @@ package org.apache.kyuubi.plugin.spark.authz.serde
import com.fasterxml.jackson.annotation.JsonIgnore
import org.apache.spark.sql.SparkSession
-import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.slf4j.LoggerFactory
@@ -95,8 +94,7 @@ case class TableCommandSpec(
case class ScanSpec(
classname: String,
- scanDescs: Seq[ScanDesc],
- functionDescs: Seq[FunctionDesc] = Seq.empty) extends CommandSpec {
+ scanDescs: Seq[ScanDesc]) extends CommandSpec {
override def opType: String = OperationType.QUERY.toString
def tables: (LogicalPlan, SparkSession) => Seq[Table] = (plan, spark) => {
scanDescs.flatMap { td =>
@@ -109,16 +107,4 @@ case class ScanSpec(
}
}
}
-
- def functions: (Expression) => Seq[Function] = (expr) => {
- functionDescs.flatMap { fd =>
- try {
- Some(fd.extract(expr))
- } catch {
- case e: Exception =>
- LOG.debug(fd.error(expr, e))
- None
- }
- }
- }
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala
index 72952120060..894a6cb8f2f 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionExtractors.scala
@@ -20,23 +20,12 @@ package org.apache.kyuubi.plugin.spark.authz.serde
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.expressions.ExpressionInfo
-import org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor.buildFunctionIdentFromQualifiedName
-
trait FunctionExtractor extends (AnyRef => Function) with Extractor
object FunctionExtractor {
val functionExtractors: Map[String, FunctionExtractor] = {
loadExtractorsToMap[FunctionExtractor]
}
-
- def buildFunctionIdentFromQualifiedName(qualifiedName: String): (String, Option[String]) = {
- val parts: Array[String] = qualifiedName.split("\\.", 2)
- if (parts.length == 1) {
- (qualifiedName, None)
- } else {
- (parts.last, Some(parts.head))
- }
- }
}
/**
@@ -48,17 +37,6 @@ class StringFunctionExtractor extends FunctionExtractor {
}
}
-/**
- * * String
- */
-class QualifiedNameStringFunctionExtractor extends FunctionExtractor {
- override def apply(v1: AnyRef): Function = {
- val qualifiedName: String = v1.asInstanceOf[String]
- val (funcName, database) = buildFunctionIdentFromQualifiedName(qualifiedName)
- Function(database, funcName)
- }
-}
-
/**
* org.apache.spark.sql.catalyst.FunctionIdentifier
*/
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala
index 193a00fa584..4c5e9dc8452 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/functionTypeExtractors.scala
@@ -19,11 +19,8 @@ package org.apache.kyuubi.plugin.spark.authz.serde
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.FunctionIdentifier
-import org.apache.spark.sql.catalyst.catalog.SessionCatalog
-import org.apache.kyuubi.plugin.spark.authz.serde.FunctionExtractor.buildFunctionIdentFromQualifiedName
import org.apache.kyuubi.plugin.spark.authz.serde.FunctionType.{FunctionType, PERMANENT, SYSTEM, TEMP}
-import org.apache.kyuubi.plugin.spark.authz.serde.FunctionTypeExtractor.getFunctionType
object FunctionType extends Enumeration {
type FunctionType = Value
@@ -36,17 +33,6 @@ object FunctionTypeExtractor {
val functionTypeExtractors: Map[String, FunctionTypeExtractor] = {
loadExtractorsToMap[FunctionTypeExtractor]
}
-
- def getFunctionType(fi: FunctionIdentifier, catalog: SessionCatalog): FunctionType = {
- fi match {
- case permanent if catalog.isPersistentFunction(permanent) =>
- PERMANENT
- case system if catalog.isRegisteredFunction(system) =>
- SYSTEM
- case _ =>
- TEMP
- }
- }
}
/**
@@ -80,18 +66,14 @@ class FunctionIdentifierFunctionTypeExtractor extends FunctionTypeExtractor {
override def apply(v1: AnyRef, spark: SparkSession): FunctionType = {
val catalog = spark.sessionState.catalog
val fi = v1.asInstanceOf[FunctionIdentifier]
- getFunctionType(fi, catalog)
- }
-}
-
-/**
- * String
- */
-class FunctionNameFunctionTypeExtractor extends FunctionTypeExtractor {
- override def apply(v1: AnyRef, spark: SparkSession): FunctionType = {
- val catalog: SessionCatalog = spark.sessionState.catalog
- val qualifiedName: String = v1.asInstanceOf[String]
- val (funcName, database) = buildFunctionIdentFromQualifiedName(qualifiedName)
- getFunctionType(FunctionIdentifier(funcName, database), catalog)
+ if (catalog.isTemporaryFunction(fi)) {
+ TEMP
+ } else if (catalog.isPersistentFunction(fi)) {
+ PERMANENT
+ } else if (catalog.isRegisteredFunction(fi)) {
+ SYSTEM
+ } else {
+ TEMP
+ }
}
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala
index 07f91a95d99..a52a558a00a 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/main/scala/org/apache/kyuubi/plugin/spark/authz/serde/package.scala
@@ -66,10 +66,9 @@ package object serde {
}
final private lazy val SCAN_SPECS: Map[String, ScanSpec] = {
- val is = getClass.getClassLoader.getResourceAsStream("scan_spec.json")
+ val is = getClass.getClassLoader.getResourceAsStream("scan_command_spec.json")
mapper.readValue(is, new TypeReference[Array[ScanSpec]] {})
- .map(e => (e.classname, e))
- .filter(t => t._2.scanDescs.nonEmpty).toMap
+ .map(e => (e.classname, e)).toMap
}
def isKnownScan(r: AnyRef): Boolean = {
@@ -80,21 +79,6 @@ package object serde {
SCAN_SPECS(r.getClass.getName)
}
- final private lazy val FUNCTION_SPECS: Map[String, ScanSpec] = {
- val is = getClass.getClassLoader.getResourceAsStream("scan_spec.json")
- mapper.readValue(is, new TypeReference[Array[ScanSpec]] {})
- .map(e => (e.classname, e))
- .filter(t => t._2.functionDescs.nonEmpty).toMap
- }
-
- def isKnowFunction(r: AnyRef): Boolean = {
- FUNCTION_SPECS.contains(r.getClass.getName)
- }
-
- def getFunctionSpec(r: AnyRef): ScanSpec = {
- FUNCTION_SPECS(r.getClass.getName)
- }
-
def operationType(plan: LogicalPlan): OperationType = {
val classname = plan.getClass.getName
TABLE_COMMAND_SPECS.get(classname)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala
deleted file mode 100644
index e8da4e87168..00000000000
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/FunctionPrivilegesBuilderSuite.scala
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.kyuubi.plugin.spark.authz
-
-import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
-import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
-// scalastyle:off
-import org.scalatest.funsuite.AnyFunSuite
-
-import org.apache.kyuubi.plugin.spark.authz.OperationType.QUERY
-import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType
-
-abstract class FunctionPrivilegesBuilderSuite extends AnyFunSuite
- with SparkSessionProvider with BeforeAndAfterAll with BeforeAndAfterEach {
- // scalastyle:on
-
- protected def withTable(t: String)(f: String => Unit): Unit = {
- try {
- f(t)
- } finally {
- sql(s"DROP TABLE IF EXISTS $t")
- }
- }
-
- protected def withDatabase(t: String)(f: String => Unit): Unit = {
- try {
- f(t)
- } finally {
- sql(s"DROP DATABASE IF EXISTS $t")
- }
- }
-
- protected def checkColumns(plan: LogicalPlan, cols: Seq[String]): Unit = {
- val (in, out, _) = PrivilegesBuilder.build(plan, spark)
- assert(out.isEmpty, "Queries shall not check output privileges")
- val po = in.head
- assert(po.actionType === PrivilegeObjectActionType.OTHER)
- assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
- assert(po.columns === cols)
- }
-
- protected def checkColumns(query: String, cols: Seq[String]): Unit = {
- checkColumns(sql(query).queryExecution.optimizedPlan, cols)
- }
-
- protected val reusedDb: String = getClass.getSimpleName
- protected val reusedDb2: String = getClass.getSimpleName + "2"
- protected val reusedTable: String = reusedDb + "." + getClass.getSimpleName
- protected val reusedTableShort: String = reusedTable.split("\\.").last
- protected val reusedPartTable: String = reusedTable + "_part"
- protected val reusedPartTableShort: String = reusedPartTable.split("\\.").last
- protected val functionCount = 3
- protected val functionNamePrefix = "kyuubi_fun_"
- protected val tempFunNamePrefix = "kyuubi_temp_fun_"
-
- override def beforeAll(): Unit = {
- sql(s"CREATE DATABASE IF NOT EXISTS $reusedDb")
- sql(s"CREATE DATABASE IF NOT EXISTS $reusedDb2")
- sql(s"CREATE TABLE IF NOT EXISTS $reusedTable" +
- s" (key int, value string) USING parquet")
- sql(s"CREATE TABLE IF NOT EXISTS $reusedPartTable" +
- s" (key int, value string, pid string) USING parquet" +
- s" PARTITIONED BY(pid)")
- // scalastyle:off
- (0 until functionCount).foreach { index =>
- {
- sql(s"CREATE FUNCTION ${reusedDb}.${functionNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'")
- sql(s"CREATE FUNCTION ${reusedDb2}.${functionNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'")
- sql(s"CREATE TEMPORARY FUNCTION ${tempFunNamePrefix}${index} AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFMaskHash'")
- }
- }
- sql(s"USE ${reusedDb2}")
- // scalastyle:on
- super.beforeAll()
- }
-
- override def afterAll(): Unit = {
- Seq(reusedTable, reusedPartTable).foreach { t =>
- sql(s"DROP TABLE IF EXISTS $t")
- }
-
- Seq(reusedDb, reusedDb2).foreach { db =>
- (0 until functionCount).foreach { index =>
- sql(s"DROP FUNCTION ${db}.${functionNamePrefix}${index}")
- }
- sql(s"DROP DATABASE IF EXISTS ${db}")
- }
-
- spark.stop()
- super.afterAll()
- }
-}
-
-class HiveFunctionPrivilegesBuilderSuite extends FunctionPrivilegesBuilderSuite {
-
- override protected val catalogImpl: String = "hive"
-
- test("Function Call Query") {
- val plan = sql(s"SELECT kyuubi_fun_1('data'), " +
- s"kyuubi_fun_2(value), " +
- s"${reusedDb}.kyuubi_fun_0(value), " +
- s"kyuubi_temp_fun_1('data2')," +
- s"kyuubi_temp_fun_2(key) " +
- s"FROM $reusedTable").queryExecution.analyzed
- val (inputs, _, _) = PrivilegesBuilder.buildFunctionPrivileges(plan, spark)
- assert(inputs.size === 3)
- inputs.foreach { po =>
- assert(po.actionType === PrivilegeObjectActionType.OTHER)
- assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
- assert(po.dbname startsWith reusedDb.toLowerCase)
- assert(po.objectName startsWith functionNamePrefix.toLowerCase)
- val accessType = ranger.AccessType(po, QUERY, isInput = true)
- assert(accessType === AccessType.SELECT)
- }
- }
-
- test("Function Call Query with Quoted Name") {
- val plan = sql(s"SELECT `kyuubi_fun_1`('data'), " +
- s"`kyuubi_fun_2`(value), " +
- s"`${reusedDb}`.`kyuubi_fun_0`(value), " +
- s"`kyuubi_temp_fun_1`('data2')," +
- s"`kyuubi_temp_fun_2`(key) " +
- s"FROM $reusedTable").queryExecution.analyzed
- val (inputs, _, _) = PrivilegesBuilder.buildFunctionPrivileges(plan, spark)
- assert(inputs.size === 3)
- inputs.foreach { po =>
- assert(po.actionType === PrivilegeObjectActionType.OTHER)
- assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
- assert(po.dbname startsWith reusedDb.toLowerCase)
- assert(po.objectName startsWith functionNamePrefix.toLowerCase)
- val accessType = ranger.AccessType(po, QUERY, isInput = true)
- assert(accessType === AccessType.SELECT)
- }
- }
-
- test("Simple Function Call Query") {
- val plan = sql(s"SELECT kyuubi_fun_1('data'), " +
- s"kyuubi_fun_0('value'), " +
- s"${reusedDb}.kyuubi_fun_0('value'), " +
- s"${reusedDb}.kyuubi_fun_2('value'), " +
- s"kyuubi_temp_fun_1('data2')," +
- s"kyuubi_temp_fun_2('key') ").queryExecution.analyzed
- val (inputs, _, _) = PrivilegesBuilder.buildFunctionPrivileges(plan, spark)
- assert(inputs.size === 4)
- inputs.foreach { po =>
- assert(po.actionType === PrivilegeObjectActionType.OTHER)
- assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
- assert(po.dbname startsWith reusedDb.toLowerCase)
- assert(po.objectName startsWith functionNamePrefix.toLowerCase)
- val accessType = ranger.AccessType(po, QUERY, isInput = true)
- assert(accessType === AccessType.SELECT)
- }
- }
-
- test("Function Call In CAST Command") {
- val table = "castTable"
- withTable(table) { table =>
- val plan = sql(s"CREATE TABLE ${table} " +
- s"SELECT kyuubi_fun_1('data') col1, " +
- s"${reusedDb2}.kyuubi_fun_2(value) col2, " +
- s"kyuubi_fun_0(value) col3, " +
- s"kyuubi_fun_2('value') col4, " +
- s"${reusedDb}.kyuubi_fun_2('value') col5, " +
- s"${reusedDb}.kyuubi_fun_1('value') col6, " +
- s"kyuubi_temp_fun_1('data2') col7, " +
- s"kyuubi_temp_fun_2(key) col8 " +
- s"FROM ${reusedTable} WHERE ${reusedDb2}.kyuubi_fun_1(key)='123'").queryExecution.analyzed
- val (inputs, _, _) = PrivilegesBuilder.buildFunctionPrivileges(plan, spark)
- assert(inputs.size === 7)
- inputs.foreach { po =>
- assert(po.actionType === PrivilegeObjectActionType.OTHER)
- assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
- assert(po.dbname startsWith reusedDb.toLowerCase)
- assert(po.objectName startsWith functionNamePrefix.toLowerCase)
- val accessType = ranger.AccessType(po, QUERY, isInput = true)
- assert(accessType === AccessType.SELECT)
- }
- }
- }
-
-}
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
index e20cd13d7b4..7c7ed138b27 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
@@ -34,16 +34,13 @@ object JsonSpecFileGenerator {
writeCommandSpecJson("database", DatabaseCommands.data)
writeCommandSpecJson("table", TableCommands.data ++ IcebergCommands.data)
writeCommandSpecJson("function", FunctionCommands.data)
- writeCommandSpecJson("scan", Scans.data, isScanResource = true)
+ writeCommandSpecJson("scan", Scans.data)
}
- def writeCommandSpecJson[T <: CommandSpec](
- commandType: String,
- specArr: Array[T],
- isScanResource: Boolean = false): Unit = {
+ def writeCommandSpecJson[T <: CommandSpec](commandType: String, specArr: Array[T]): Unit = {
val pluginHome = getClass.getProtectionDomain.getCodeSource.getLocation.getPath
.split("target").head
- val filename = s"${commandType}${if (isScanResource) "" else "_command"}_spec.json"
+ val filename = s"${commandType}_command_spec.json"
val writer = {
val p = Paths.get(pluginHome, "src", "main", "resources", filename)
Files.newBufferedWriter(p, StandardCharsets.UTF_8)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala
index b2c1868a26d..7bd8260bba5 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/Scans.scala
@@ -18,7 +18,6 @@
package org.apache.kyuubi.plugin.spark.authz.gen
import org.apache.kyuubi.plugin.spark.authz.serde._
-import org.apache.kyuubi.plugin.spark.authz.serde.FunctionType._
object Scans {
@@ -58,34 +57,9 @@ object Scans {
ScanSpec(r, Seq(tableDesc))
}
- val HiveSimpleUDF = {
- ScanSpec(
- "org.apache.spark.sql.hive.HiveSimpleUDF",
- Seq.empty,
- Seq(FunctionDesc(
- "name",
- classOf[QualifiedNameStringFunctionExtractor],
- functionTypeDesc = Some(FunctionTypeDesc(
- "name",
- classOf[FunctionNameFunctionTypeExtractor],
- Seq(TEMP, SYSTEM))),
- isInput = true)))
- }
-
- val HiveGenericUDF = HiveSimpleUDF.copy(classname = "org.apache.spark.sql.hive.HiveGenericUDF")
-
- val HiveUDAFFunction = HiveSimpleUDF.copy(classname =
- "org.apache.spark.sql.hive.HiveUDAFFunction")
-
- val HiveGenericUDTF = HiveSimpleUDF.copy(classname = "org.apache.spark.sql.hive.HiveGenericUDTF")
-
val data: Array[ScanSpec] = Array(
HiveTableRelation,
LogicalRelation,
DataSourceV2Relation,
- PermanentViewMarker,
- HiveSimpleUDF,
- HiveGenericUDF,
- HiveUDAFFunction,
- HiveGenericUDTF)
+ PermanentViewMarker)
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
index 4f971ba6201..7bf01b43f89 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
@@ -637,7 +637,7 @@ object TableCommands {
"org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand"),
InsertIntoHadoopFsRelationCommand,
InsertIntoDataSourceDir.copy(classname =
- "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand"),
+ "org.apache.spark.sql.execution.datasources.InsertIntoDataSourceDirCommand"),
InsertIntoHiveTable,
LoadData,
MergeIntoTable,
diff --git a/pom.xml b/pom.xml
index 1feae0322a9..8c3f71976d7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2343,7 +2343,7 @@
extensions/spark/kyuubi-extension-spark-3-2
- 3.2.4
+ 3.2.33.22.0.2spark-${spark.version}-bin-hadoop3.2.tgz
From 8c7b457d88c97fafd5a4108a282192500ea7cf4e Mon Sep 17 00:00:00 2001
From: Anurag Rajawat
Date: Mon, 17 Apr 2023 09:14:20 +0800
Subject: [PATCH 051/404] [KYUUBI #4712] Bump Spark from 3.2.3 to 3.2.4
### _Why are the changes needed?_
Fixes #4712
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4718 from anurag-rajawat/upgrade-spark.
Closes #4712
79dcf1b79 [Anurag Rajawat] Bump Spark from 3.2.3 to 3.2.4
Authored-by: Anurag Rajawat
Signed-off-by: liangbowen
---
.github/workflows/master.yml | 2 +-
pom.xml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 90d51fa959f..04ecb1a601a 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -61,7 +61,7 @@ jobs:
comment: 'verify-on-spark-3.1-binary'
- java: 8
spark: '3.3'
- spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.2.3 -Dspark.archive.name=spark-3.2.3-bin-hadoop3.2.tgz'
+ spark-archive: '-Dspark.archive.mirror=https://archive.apache.org/dist/spark/spark-3.2.4 -Dspark.archive.name=spark-3.2.4-bin-hadoop3.2.tgz'
exclude-tags: '-Dmaven.plugin.scalatest.exclude.tags=org.scalatest.tags.Slow,org.apache.kyuubi.tags.DeltaTest,org.apache.kyuubi.tags.HudiTest,org.apache.kyuubi.tags.IcebergTest'
comment: 'verify-on-spark-3.2-binary'
env:
diff --git a/pom.xml b/pom.xml
index 8c3f71976d7..1feae0322a9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2343,7 +2343,7 @@
extensions/spark/kyuubi-extension-spark-3-2
- 3.2.3
+ 3.2.43.22.0.2spark-${spark.version}-bin-hadoop3.2.tgz
From f6331a2a0fc0650ba0970d58f90a7a7c8e908095 Mon Sep 17 00:00:00 2001
From: zwangsheng <2213335496@qq.com>
Date: Tue, 18 Apr 2023 13:44:23 +0800
Subject: [PATCH 052/404] [KYUUBI #3653][REST] AdminResource add list kyuubi
server api
### _Why are the changes needed?_
Add List Kyuubi Server Api for `AdminResource`
### _How was this patch tested?_
- [x] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4670 from zwangsheng/KYUUBI_3653.
Closes #3653
b91a6c617 [zwangsheng] fxi
4271d0fd0 [zwangsheng] fix comments
e14f8cd55 [zwangsheng] [KYUUBI #3653][REST] AdminResource add list server api
Authored-by: zwangsheng <2213335496@qq.com>
Signed-off-by: fwang12
---
.../kyuubi/client/api/v1/dto/ServerData.java | 129 ++++++++++++++++++
.../apache/kyuubi/server/api/ApiUtils.scala | 14 +-
.../kyuubi/server/api/v1/AdminResource.scala | 31 ++++-
.../server/api/v1/AdminResourceSuite.scala | 104 +++++++-------
4 files changed, 219 insertions(+), 59 deletions(-)
create mode 100644 kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/ServerData.java
diff --git a/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/ServerData.java b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/ServerData.java
new file mode 100644
index 00000000000..d64d43a72af
--- /dev/null
+++ b/kyuubi-rest-client/src/main/java/org/apache/kyuubi/client/api/v1/dto/ServerData.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.client.api.v1.dto;
+
+import java.util.Map;
+import java.util.Objects;
+
+public class ServerData {
+ private String nodeName;
+ private String namespace;
+ private String instance;
+ private String host;
+ private int port;
+ private Map attributes;
+ private String status;
+
+ public ServerData(
+ String nodeName,
+ String namespace,
+ String instance,
+ String host,
+ int port,
+ Map attributes,
+ String status) {
+ this.nodeName = nodeName;
+ this.namespace = namespace;
+ this.instance = instance;
+ this.host = host;
+ this.port = port;
+ this.attributes = attributes;
+ this.status = status;
+ }
+
+ public String getNodeName() {
+ return nodeName;
+ }
+
+ public ServerData setNodeName(String nodeName) {
+ this.nodeName = nodeName;
+ return this;
+ }
+
+ public String getNamespace() {
+ return namespace;
+ }
+
+ public ServerData setNamespace(String namespace) {
+ this.namespace = namespace;
+ return this;
+ }
+
+ public String getInstance() {
+ return instance;
+ }
+
+ public ServerData setInstance(String instance) {
+ this.instance = instance;
+ return this;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public ServerData setHost(String host) {
+ this.host = host;
+ return this;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public ServerData setPort(int port) {
+ this.port = port;
+ return this;
+ }
+
+ public Map getAttributes() {
+ return attributes;
+ }
+
+ public ServerData setAttributes(Map attributes) {
+ this.attributes = attributes;
+ return this;
+ }
+
+ public String getStatus() {
+ return status;
+ }
+
+ public ServerData setStatus(String status) {
+ this.status = status;
+ return this;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(nodeName, namespace, instance, port, attributes, status);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null || getClass() != obj.getClass()) return false;
+ ServerData server = (ServerData) obj;
+ return port == server.port
+ && Objects.equals(nodeName, server.nodeName)
+ && Objects.equals(namespace, server.namespace)
+ && Objects.equals(instance, server.instance)
+ && Objects.equals(host, server.host)
+ && Objects.equals(status, server.status);
+ }
+}
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/ApiUtils.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/ApiUtils.scala
index ebbf04c9073..1f2cb309b80 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/ApiUtils.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/ApiUtils.scala
@@ -20,8 +20,9 @@ package org.apache.kyuubi.server.api
import scala.collection.JavaConverters._
import org.apache.kyuubi.Utils
-import org.apache.kyuubi.client.api.v1.dto.{OperationData, SessionData}
+import org.apache.kyuubi.client.api.v1.dto.{OperationData, ServerData, SessionData}
import org.apache.kyuubi.events.KyuubiOperationEvent
+import org.apache.kyuubi.ha.client.ServiceNodeInfo
import org.apache.kyuubi.operation.KyuubiOperation
import org.apache.kyuubi.session.KyuubiSession
@@ -58,4 +59,15 @@ object ApiUtils {
opEvent.sessionType,
operation.getSession.asInstanceOf[KyuubiSession].connectionUrl)
}
+
+ def serverData(nodeInfo: ServiceNodeInfo): ServerData = {
+ new ServerData(
+ nodeInfo.nodeName,
+ nodeInfo.namespace,
+ nodeInfo.instance,
+ nodeInfo.host,
+ nodeInfo.port,
+ nodeInfo.attributes.asJava,
+ "Running")
+ }
}
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/AdminResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/AdminResource.scala
index 0d8b31b2c65..113660a41c3 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/AdminResource.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/AdminResource.scala
@@ -31,7 +31,7 @@ import org.apache.commons.lang3.StringUtils
import org.apache.zookeeper.KeeperException.NoNodeException
import org.apache.kyuubi.{KYUUBI_VERSION, Logging, Utils}
-import org.apache.kyuubi.client.api.v1.dto.{Engine, OperationData, SessionData}
+import org.apache.kyuubi.client.api.v1.dto.{Engine, OperationData, ServerData, SessionData}
import org.apache.kyuubi.config.KyuubiConf
import org.apache.kyuubi.config.KyuubiConf._
import org.apache.kyuubi.ha.HighAvailabilityConf.HA_NAMESPACE
@@ -296,6 +296,35 @@ private[v1] class AdminResource extends ApiRequestContext with Logging {
node.attributes.asJava))
}
+ @ApiResponse(
+ responseCode = "200",
+ content = Array(
+ new Content(
+ mediaType = MediaType.APPLICATION_JSON,
+ array = new ArraySchema(schema = new Schema(implementation =
+ classOf[OperationData])))),
+ description = "list all live kyuubi servers")
+ @GET
+ @Path("server")
+ def listServers(): Seq[ServerData] = {
+ val userName = fe.getSessionUser(Map.empty[String, String])
+ val ipAddress = fe.getIpAddress
+ info(s"Received list all live kyuubi servers request from $userName/$ipAddress")
+ if (!isAdministrator(userName)) {
+ throw new NotAllowedException(
+ s"$userName is not allowed to list all live kyuubi servers")
+ }
+ val kyuubiConf = fe.getConf
+ val servers = ListBuffer[ServerData]()
+ val serverSpec = DiscoveryPaths.makePath(null, kyuubiConf.get(HA_NAMESPACE))
+ withDiscoveryClient(kyuubiConf) { discoveryClient =>
+ discoveryClient.getServiceNodesInfo(serverSpec).map(nodeInfo => {
+ servers += ApiUtils.serverData(nodeInfo)
+ })
+ }
+ servers
+ }
+
private def getEngine(
userName: String,
engineType: String,
diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/AdminResourceSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/AdminResourceSuite.scala
index a10994d7ea5..b7650627eee 100644
--- a/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/AdminResourceSuite.scala
+++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/server/api/v1/AdminResourceSuite.scala
@@ -17,6 +17,7 @@
package org.apache.kyuubi.server.api.v1
+import java.nio.charset.StandardCharsets
import java.util.{Base64, UUID}
import javax.ws.rs.client.Entity
import javax.ws.rs.core.{GenericType, MediaType}
@@ -24,19 +25,22 @@ import javax.ws.rs.core.{GenericType, MediaType}
import scala.collection.JavaConverters._
import org.apache.hive.service.rpc.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V2
+import org.mockito.Mockito.lenient
import org.scalatest.time.SpanSugar.convertIntToGrainOfTime
+import org.scalatestplus.mockito.MockitoSugar.mock
import org.apache.kyuubi.{KYUUBI_VERSION, KyuubiFunSuite, RestFrontendTestHelper, Utils}
-import org.apache.kyuubi.client.api.v1.dto.{Engine, OperationData, SessionData, SessionHandle, SessionOpenRequest}
+import org.apache.kyuubi.client.api.v1.dto.{Engine, OperationData, ServerData, SessionData, SessionHandle, SessionOpenRequest}
import org.apache.kyuubi.config.KyuubiConf
import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_CONNECTION_URL_KEY
import org.apache.kyuubi.engine.{ApplicationState, EngineRef, KyuubiApplicationManager}
import org.apache.kyuubi.engine.EngineType.SPARK_SQL
import org.apache.kyuubi.engine.ShareLevel.{CONNECTION, USER}
import org.apache.kyuubi.ha.HighAvailabilityConf
+import org.apache.kyuubi.ha.client.{DiscoveryPaths, ServiceDiscovery}
import org.apache.kyuubi.ha.client.DiscoveryClientProvider.withDiscoveryClient
-import org.apache.kyuubi.ha.client.DiscoveryPaths
import org.apache.kyuubi.plugin.PluginLoader
+import org.apache.kyuubi.server.KyuubiRestFrontendService
import org.apache.kyuubi.server.http.authentication.AuthenticationHandler.AUTHORIZATION_HEADER
class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
@@ -46,6 +50,13 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
override protected lazy val conf: KyuubiConf = KyuubiConf()
.set(KyuubiConf.SERVER_ADMINISTRATORS, Seq("admin001"))
+ private val encodeAuthorization: String = {
+ new String(
+ Base64.getEncoder.encode(
+ s"${Utils.currentUser}:".getBytes()),
+ StandardCharsets.UTF_8)
+ }
+
override def beforeAll(): Unit = {
super.beforeAll()
engineMgr.initialize(KyuubiConf())
@@ -63,11 +74,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
.post(null)
assert(405 == response.getStatus)
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
response = webTarget.path("api/v1/admin/refresh/hadoop_conf")
.request()
.header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization")
@@ -76,7 +82,7 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
val admin001AuthHeader = new String(
Base64.getEncoder.encode("admin001".getBytes()),
- "UTF-8")
+ StandardCharsets.UTF_8)
response = webTarget.path("api/v1/admin/refresh/hadoop_conf")
.request()
.header(AUTHORIZATION_HEADER, s"BASIC $admin001AuthHeader")
@@ -85,7 +91,7 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
val admin002AuthHeader = new String(
Base64.getEncoder.encode("admin002".getBytes()),
- "UTF-8")
+ StandardCharsets.UTF_8)
response = webTarget.path("api/v1/admin/refresh/hadoop_conf")
.request()
.header(AUTHORIZATION_HEADER, s"BASIC $admin002AuthHeader")
@@ -99,11 +105,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
.post(null)
assert(405 == response.getStatus)
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
response = webTarget.path("api/v1/admin/refresh/user_defaults_conf")
.request()
.header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization")
@@ -117,11 +118,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
.post(null)
assert(405 == response.getStatus)
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
response = webTarget.path("api/v1/admin/refresh/unlimited_users")
.request()
.header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization")
@@ -136,12 +132,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
.request(MediaType.APPLICATION_JSON_TYPE)
.post(Entity.entity(requestObj, MediaType.APPLICATION_JSON_TYPE))
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
-
// get session list
var response2 = webTarget.path("api/v1/admin/sessions").request()
.header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization")
@@ -196,12 +186,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
"localhost",
Map("testConfig" -> "testValue"))
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
-
// list sessions
var response = webTarget.path("api/v1/admin/sessions")
.queryParam("users", "admin")
@@ -249,12 +233,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
Map("testConfig" -> "testValue"))
val operation = fe.be.getCatalogs(sessionHandle)
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
-
// list operations
var response = webTarget.path("api/v1/admin/operations").request()
.header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization")
@@ -301,11 +279,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
assert(client.pathExists(engineSpace))
assert(client.getChildren(engineSpace).size == 1)
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
val response = webTarget.path("api/v1/admin/engine")
.queryParam("sharelevel", "USER")
.queryParam("type", "spark_sql")
@@ -349,11 +322,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
assert(client.pathExists(engineSpace))
assert(client.getChildren(engineSpace).size == 1)
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
val response = webTarget.path("api/v1/admin/engine")
.queryParam("sharelevel", "connection")
.queryParam("type", "spark_sql")
@@ -389,11 +357,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
assert(client.pathExists(engineSpace))
assert(client.getChildren(engineSpace).size == 1)
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
val response = webTarget.path("api/v1/admin/engine")
.queryParam("type", "spark_sql")
.request(MediaType.APPLICATION_JSON_TYPE)
@@ -453,11 +416,6 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
assert(client.pathExists(engineSpace1))
assert(client.pathExists(engineSpace2))
- val adminUser = Utils.currentUser
- val encodeAuthorization = new String(
- Base64.getEncoder.encode(
- s"$adminUser:".getBytes()),
- "UTF-8")
val response = webTarget.path("api/v1/admin/engine")
.queryParam("type", "spark_sql")
.request(MediaType.APPLICATION_JSON_TYPE)
@@ -488,4 +446,36 @@ class AdminResourceSuite extends KyuubiFunSuite with RestFrontendTestHelper {
}
}
}
+
+ test("list server") {
+ // Mock Kyuubi Server
+ val serverDiscovery = mock[ServiceDiscovery]
+ lenient.when(serverDiscovery.fe).thenReturn(fe)
+ val namespace = conf.get(HighAvailabilityConf.HA_NAMESPACE)
+ withDiscoveryClient(conf) { client =>
+ client.registerService(conf, namespace, serverDiscovery)
+
+ val response = webTarget.path("api/v1/admin/server")
+ .request()
+ .header(AUTHORIZATION_HEADER, s"BASIC $encodeAuthorization")
+ .get
+
+ assert(200 == response.getStatus)
+ val result = response.readEntity(new GenericType[Seq[ServerData]]() {})
+ assert(result.size == 1)
+ val testServer = result.head
+ val export = fe.asInstanceOf[KyuubiRestFrontendService]
+
+ assert(namespace.equals(testServer.getNamespace.replaceFirst("/", "")))
+ assert(export.host.equals(testServer.getHost))
+ assert(export.connectionUrl.equals(testServer.getInstance()))
+ assert(!testServer.getAttributes.isEmpty)
+ val attributes = testServer.getAttributes
+ assert(attributes.containsKey("serviceUri") &&
+ attributes.get("serviceUri").equals(fe.connectionUrl))
+ assert(attributes.containsKey("version"))
+ assert(attributes.containsKey("sequence"))
+ assert("Running".equals(testServer.getStatus))
+ }
+ }
}
From 4581920a31dc5a66b379a2737c14d129b2165397 Mon Sep 17 00:00:00 2001
From: Deng An
Date: Tue, 18 Apr 2023 23:48:21 +0800
Subject: [PATCH 053/404] [KYUUBI #4716] [KYUUBI 4715] [AUTHZ] Fix the
incorrect class name of InsertIntoHiveDirCommand in table spec generator
### _Why are the changes needed?_
to close #4715
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4716 from packyan/improve_prevent_edit_auto_generated_files.
Closes #4716
b6fff8fe7 [Deng An] fix the inconsistency in the spec json file
Authored-by: Deng An
Signed-off-by: liangbowen
---
.../src/main/resources/table_command_spec.json | 16 ++++++++--------
.../plugin/spark/authz/gen/TableCommands.scala | 2 +-
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json b/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
index 81ccd8da085..3d6fcd93ba7 100644
--- a/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
+++ b/extensions/spark/kyuubi-spark-authz/src/main/resources/table_command_spec.json
@@ -1243,14 +1243,6 @@
"fieldName" : "query",
"fieldExtractor" : "LogicalPlanQueryExtractor"
} ]
-}, {
- "classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand",
- "tableDescs" : [ ],
- "opType" : "QUERY",
- "queryDescs" : [ {
- "fieldName" : "query",
- "fieldExtractor" : "LogicalPlanQueryExtractor"
- } ]
}, {
"classname" : "org.apache.spark.sql.execution.datasources.RefreshTable",
"tableDescs" : [ {
@@ -1293,6 +1285,14 @@
"fieldName" : "query",
"fieldExtractor" : "LogicalPlanQueryExtractor"
} ]
+}, {
+ "classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand",
+ "tableDescs" : [ ],
+ "opType" : "QUERY",
+ "queryDescs" : [ {
+ "fieldName" : "query",
+ "fieldExtractor" : "LogicalPlanQueryExtractor"
+ } ]
}, {
"classname" : "org.apache.spark.sql.hive.execution.InsertIntoHiveTable",
"tableDescs" : [ {
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
index 7bf01b43f89..4f971ba6201 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/TableCommands.scala
@@ -637,7 +637,7 @@ object TableCommands {
"org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand"),
InsertIntoHadoopFsRelationCommand,
InsertIntoDataSourceDir.copy(classname =
- "org.apache.spark.sql.execution.datasources.InsertIntoDataSourceDirCommand"),
+ "org.apache.spark.sql.hive.execution.InsertIntoHiveDirCommand"),
InsertIntoHiveTable,
LoadData,
MergeIntoTable,
From 2c2f6f9e93cad8ec436f6a658efadfe275cb0073 Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Wed, 19 Apr 2023 15:48:09 +0800
Subject: [PATCH 054/404] [KYUUBI #4734] [Docs] Fix typo in docs of custom
event handler
### _Why are the changes needed?_
- to fix typo in dependency reference in doc of custom event handler
![image](https://user-images.githubusercontent.com/1935105/233002453-76b04fc1-72a2-4171-b39f-05e558970c4f.png)
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [x] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4734 from bowenliang123/fix-event-doc.
Closes #4734
470c6d1a8 [Bowen Liang] Update docs/extensions/server/events.rst
afbe163b3 [liangbowen] use the `release` directive
1a5542f54 [liangbowen] fix doc of custom event handler
Lead-authored-by: liangbowen
Co-authored-by: Bowen Liang
Signed-off-by: liangbowen
---
docs/extensions/server/events.rst | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/extensions/server/events.rst b/docs/extensions/server/events.rst
index 832c1e5df55..aee7d4899d2 100644
--- a/docs/extensions/server/events.rst
+++ b/docs/extensions/server/events.rst
@@ -51,12 +51,12 @@ To create custom EventHandlerProvider class derived from the above interface, we
- Referencing the library
-.. code-block:: xml
+.. parsed-literal::
org.apache.kyuubi
- kyuubi-event_2.12
- 1.7.0-incubating
+ kyuubi-events_2.12
+ \ |release|\provided
From 609018a6b2f5480f7e7c5fd20e314ceaf2dc83b9 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Wed, 19 Apr 2023 17:48:14 +0800
Subject: [PATCH 055/404] [KYUUBI #4727] [DOC] kyuubi-spark-lineage has no
transitive deps
### _Why are the changes needed?_
Update outdated docs
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4727 from pan3793/lineage-doc.
Closes #4727
b6843b282 [Cheng Pan] [DOC] kyuubi-spark-lineage has no transitive deps
Authored-by: Cheng Pan
Signed-off-by: odone
---
docs/extensions/engines/spark/lineage.md | 5 ++---
extensions/spark/kyuubi-spark-lineage/README.md | 2 +-
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/docs/extensions/engines/spark/lineage.md b/docs/extensions/engines/spark/lineage.md
index cd38be4ba12..665929e9f2d 100644
--- a/docs/extensions/engines/spark/lineage.md
+++ b/docs/extensions/engines/spark/lineage.md
@@ -101,13 +101,12 @@ Kyuubi Spark Lineage Listener Extension is built using [Apache Maven](https://ma
To build it, `cd` to the root direct of kyuubi project and run:
```shell
-build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -DskipTests
+build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -am -DskipTests
```
After a while, if everything goes well, you will get the plugin finally in two parts:
- The main plugin jar, which is under `./extensions/spark/kyuubi-spark-lineage/target/kyuubi-spark-lineage_${scala.binary.version}-${project.version}.jar`
-- The least transitive dependencies needed, which are under `./extensions/spark/kyuubi-spark-lineage/target/scala-${scala.binary.version}/jars`
### Build against Different Apache Spark Versions
@@ -118,7 +117,7 @@ Sometimes, it may be incompatible with other Spark distributions, then you may n
For example,
```shell
-build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -DskipTests -Dspark.version=3.1.2
+build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -am -DskipTests -Dspark.version=3.1.2
```
The available `spark.version`s are shown in the following table.
diff --git a/extensions/spark/kyuubi-spark-lineage/README.md b/extensions/spark/kyuubi-spark-lineage/README.md
index 34f2733b4f6..5365f2d778c 100644
--- a/extensions/spark/kyuubi-spark-lineage/README.md
+++ b/extensions/spark/kyuubi-spark-lineage/README.md
@@ -26,7 +26,7 @@
## Build
```shell
-build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -Dspark.version=3.2.1
+build/mvn clean package -pl :kyuubi-spark-lineage_2.12 -am -Dspark.version=3.2.1
```
### Supported Apache Spark Versions
From 2ed0990b7304ca5986ab116e02f7508e3d732549 Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Wed, 19 Apr 2023 18:06:06 +0800
Subject: [PATCH 056/404] [KYUUBI #4676] [AUTHZ] Reuse users and namespaces in
both tests and policy file generation
### _Why are the changes needed?_
- align the same list of users and namespaces used in tests and in policy file generation, as users and namespaces are the most important elements of Ranger policy's conditions and resources.
- help to improve and simplify the decision in Authz testing and make a clear view of what's exactly tested and authorized, and very handy and easy to see the usage link in IDE
- reduce possible abuse and untracable uses of authorized and unauthorized users, rules, resources. (We have up to 4 unauthorized users in separated tests!)
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4676 from bowenliang123/authz-gen-common.
Closes #4676
dc535a4d8 [liangbowen] authz-gen-common
Authored-by: liangbowen
Signed-off-by: liangbowen
---
.../authz/gen/PolicyJsonFileGenerator.scala | 20 +-
.../spark/authz/gen/RangerGenWrapper.scala | 2 +-
.../spark/authz/PrivilegesBuilderSuite.scala | 40 +--
.../spark/authz/RangerTestResources.scala | 45 +++
.../spark/authz/SparkSessionProvider.scala | 12 +-
.../authz/V2CommandsPrivilegesSuite.scala | 9 +-
...bergCatalogRangerSparkExtensionSuite.scala | 46 ++--
.../ranger/RangerSparkExtensionSuite.scala | 260 +++++++++---------
.../ranger/SparkRangerAdminPluginSuite.scala | 10 +-
...ableCatalogRangerSparkExtensionSuite.scala | 60 ++--
.../datamasking/DataMaskingTestBase.scala | 75 ++---
.../rowfiltering/RowFilteringTestBase.scala | 37 +--
12 files changed, 329 insertions(+), 287 deletions(-)
create mode 100644 extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/RangerTestResources.scala
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala
index 8dbc802b81b..e53d771973a 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/PolicyJsonFileGenerator.scala
@@ -31,7 +31,9 @@ import org.apache.ranger.plugin.model.RangerPolicy
// scalastyle:off
import org.scalatest.funsuite.AnyFunSuite
-import org.apache.kyuubi.plugin.spark.authz.gen.KRangerPolicyItemAccess._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
+import org.apache.kyuubi.plugin.spark.authz.gen.KRangerPolicyItemAccess.allowTypes
import org.apache.kyuubi.plugin.spark.authz.gen.KRangerPolicyResource._
import org.apache.kyuubi.plugin.spark.authz.gen.RangerAccessType._
import org.apache.kyuubi.plugin.spark.authz.gen.RangerClassConversions._
@@ -127,22 +129,6 @@ class PolicyJsonFileGenerator extends AnyFunSuite {
}
}
- // users
- private val admin = "admin"
- private val bob = "bob"
- private val kent = "kent"
- private val permViewUser = "perm_view_user"
- private val ownerPlaceHolder = "{OWNER}"
- private val createOnlyUser = "create_only_user"
- private val defaultTableOwner = "default_table_owner"
- private val permViewOnlyUser = "user_perm_view_only"
-
- // db
- private val defaultDb = "default"
- private val sparkCatalog = "spark_catalog"
- private val icebergNamespace = "iceberg_ns"
- private val namespace1 = "ns1"
-
// resources
private val allDatabaseRes = databaseRes("*")
private val allTableRes = tableRes("*")
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala
index 14405f81698..71bce375972 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/gen/scala/org/apache/kyuubi/plugin/spark/authz/gen/RangerGenWrapper.scala
@@ -22,7 +22,7 @@ import scala.language.implicitConversions
import org.apache.ranger.plugin.model.RangerPolicy
import org.apache.ranger.plugin.model.RangerPolicy._
-import org.apache.kyuubi.plugin.spark.authz.gen.RangerClassConversions.getRangerObject
+import org.apache.kyuubi.plugin.spark.authz.gen.RangerClassConversions._
trait RangerObjectGenerator[T] {
def get: T
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilderSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilderSuite.scala
index e9483eb34ba..af4a7c2623a 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilderSuite.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/PrivilegesBuilderSuite.scala
@@ -30,6 +30,8 @@ import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.funsuite.AnyFunSuite
import org.apache.kyuubi.plugin.spark.authz.OperationType._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType
import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils
import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils.isSparkVersionAtMost
@@ -122,8 +124,8 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE)
assert(po.catalog.isEmpty)
- assert(po.dbname === "default")
- assert(po.objectName === "default")
+ assert(po.dbname === defaultDb)
+ assert(po.objectName === defaultDb)
assert(po.columns.isEmpty)
}
@@ -365,7 +367,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname === (if (isSparkV2) null else "default"))
+ assert(po.dbname === (if (isSparkV2) null else defaultDb))
assert(po.objectName === "AlterViewAsCommand")
checkTableOwner(po)
assert(po.columns.isEmpty)
@@ -521,7 +523,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname === (if (isSparkV2) null else "default"))
+ assert(po.dbname === (if (isSparkV2) null else defaultDb))
assert(po.objectName === "CreateViewCommand")
assert(po.columns.isEmpty)
val accessType = ranger.AccessType(po, operationType, isInput = false)
@@ -541,7 +543,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname === (if (isSparkV2) null else "default"))
+ assert(po.dbname === (if (isSparkV2) null else defaultDb))
assert(po.objectName === tableName)
assert(po.columns.isEmpty)
val accessType = ranger.AccessType(po, operationType, isInput = false)
@@ -588,7 +590,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
assert(po.catalog.isEmpty)
- val db = if (isSparkV33OrGreater) "default" else null
+ val db = if (isSparkV33OrGreater) defaultDb else null
assert(po.dbname === db)
assert(po.objectName === "CreateFunctionCommand")
assert(po.columns.isEmpty)
@@ -620,7 +622,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
assert(po.catalog.isEmpty)
- val db = if (isSparkV33OrGreater) "default" else null
+ val db = if (isSparkV33OrGreater) defaultDb else null
assert(po.dbname === db)
assert(po.objectName === "DropFunctionCommand")
assert(po.columns.isEmpty)
@@ -641,7 +643,7 @@ abstract class PrivilegesBuilderSuite extends AnyFunSuite
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.FUNCTION)
assert(po.catalog.isEmpty)
- val db = if (isSparkV33OrGreater) "default" else null
+ val db = if (isSparkV33OrGreater) defaultDb else null
assert(po.dbname === db)
assert(po.objectName === "RefreshFunctionCommand")
assert(po.columns.isEmpty)
@@ -1267,8 +1269,8 @@ class InMemoryPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE)
assert(po.catalog.isEmpty)
- assert(po.dbname === "default")
- assert(po.objectName === "default")
+ assert(po.dbname === defaultDb)
+ assert(po.objectName === defaultDb)
assert(po.columns.isEmpty)
val accessType = ranger.AccessType(po, operationType, isInput = false)
assert(accessType === AccessType.ALTER)
@@ -1296,7 +1298,7 @@ class InMemoryPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname === (if (isSparkV2) null else "default"))
+ assert(po.dbname === (if (isSparkV2) null else defaultDb))
assert(po.objectName === "CreateDataSourceTableAsSelectCommand")
if (catalogImpl == "hive") {
assert(po.columns === Seq("key", "value"))
@@ -1328,7 +1330,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname === "default")
+ assert(po.dbname === defaultDb)
assert(po.objectName === t)
assert(po.columns.head === "pid")
checkTableOwner(po)
@@ -1350,7 +1352,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname === "default")
+ assert(po.dbname === defaultDb)
assert(po.objectName === "CreateTableCommand")
assert(po.columns.isEmpty)
val accessType = ranger.AccessType(po, operationType, isInput = false)
@@ -1382,7 +1384,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname === "default")
+ assert(po.dbname === defaultDb)
assert(po.objectName === "CreateHiveTableAsSelectCommand")
assert(po.columns === Seq("key", "value"))
val accessType = ranger.AccessType(po, operationType, isInput = false)
@@ -1493,7 +1495,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.INSERT)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname equalsIgnoreCase "default")
+ assert(po.dbname equalsIgnoreCase defaultDb)
assert(po.objectName equalsIgnoreCase tableName)
assert(po.columns.isEmpty)
checkTableOwner(po)
@@ -1536,7 +1538,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.INSERT)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname equalsIgnoreCase "default")
+ assert(po.dbname equalsIgnoreCase defaultDb)
assert(po.objectName equalsIgnoreCase tableName)
assert(po.columns === Seq("a", "b"))
checkTableOwner(po)
@@ -1618,7 +1620,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.INSERT)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname equalsIgnoreCase "default")
+ assert(po.dbname equalsIgnoreCase defaultDb)
assert(po.objectName equalsIgnoreCase tableName)
assert(po.columns === Seq("a", "b"))
checkTableOwner(po)
@@ -1639,7 +1641,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
val po0 = in.head
assert(po0.actionType === PrivilegeObjectActionType.OTHER)
assert(po0.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
- assert(po0.dbname === "default")
+ assert(po0.dbname === defaultDb)
assert(po0.objectName === t)
assert(po0.columns.isEmpty)
checkTableOwner(po0)
@@ -1665,7 +1667,7 @@ class HiveCatalogPrivilegeBuilderSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.TABLE_OR_VIEW)
assert(po.catalog.isEmpty)
- assert(po.dbname === "default")
+ assert(po.dbname === defaultDb)
assert(po.objectName === "OptimizedCreateHiveTableAsSelectCommand")
assert(po.columns === Seq("a"))
val accessType = ranger.AccessType(po, operationType, isInput = false)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/RangerTestResources.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/RangerTestResources.scala
new file mode 100644
index 00000000000..2297f73f9c4
--- /dev/null
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/RangerTestResources.scala
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.plugin.spark.authz
+
+object RangerTestUsers {
+ // authorized users used in policy generation
+ val admin = "admin"
+ val alice = "alice"
+ val bob = "bob"
+ val kent = "kent"
+ val permViewUser = "perm_view_user"
+ val ownerPlaceHolder = "{OWNER}"
+ val createOnlyUser = "create_only_user"
+ val defaultTableOwner = "default_table_owner"
+ val permViewOnlyUser = "user_perm_view_only"
+
+ // non-authorized users
+ val invisibleUser = "i_am_invisible"
+ val denyUser = "denyuser"
+ val denyUser2 = "denyuser2"
+ val someone = "someone"
+}
+
+object RangerTestNamespace {
+ val defaultDb = "default"
+ val sparkCatalog = "spark_catalog"
+ val icebergNamespace = "iceberg_ns"
+ val namespace1 = "ns1"
+ val namespace2 = "ns2"
+}
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/SparkSessionProvider.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/SparkSessionProvider.scala
index ce8d6bc0ccf..6b1087930d3 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/SparkSessionProvider.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/SparkSessionProvider.scala
@@ -26,6 +26,7 @@ import org.apache.spark.sql.{DataFrame, Row, SparkSession, SparkSessionExtension
import org.scalatest.Assertions.convertToEqualizer
import org.apache.kyuubi.Utils
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils._
trait SparkSessionProvider {
@@ -39,7 +40,6 @@ trait SparkSessionProvider {
protected val extension: SparkSessionExtensions => Unit = _ => Unit
protected val sqlExtensions: String = ""
- protected val defaultTableOwner = "default_table_owner"
protected val extraSparkConf: SparkConf = new SparkConf()
protected lazy val spark: SparkSession = {
@@ -83,12 +83,12 @@ trait SparkSessionProvider {
f
} finally {
res.foreach {
- case (t, "table") => doAs("admin", sql(s"DROP TABLE IF EXISTS $t"))
- case (db, "database") => doAs("admin", sql(s"DROP DATABASE IF EXISTS $db"))
- case (fn, "function") => doAs("admin", sql(s"DROP FUNCTION IF EXISTS $fn"))
- case (view, "view") => doAs("admin", sql(s"DROP VIEW IF EXISTS $view"))
+ case (t, "table") => doAs(admin, sql(s"DROP TABLE IF EXISTS $t"))
+ case (db, "database") => doAs(admin, sql(s"DROP DATABASE IF EXISTS $db"))
+ case (fn, "function") => doAs(admin, sql(s"DROP FUNCTION IF EXISTS $fn"))
+ case (view, "view") => doAs(admin, sql(s"DROP VIEW IF EXISTS $view"))
case (cacheTable, "cache") => if (isSparkV32OrGreater) {
- doAs("admin", sql(s"UNCACHE TABLE IF EXISTS $cacheTable"))
+ doAs(admin, sql(s"UNCACHE TABLE IF EXISTS $cacheTable"))
}
case (_, e) =>
throw new RuntimeException(s"the resource whose resource type is $e cannot be cleared")
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2CommandsPrivilegesSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2CommandsPrivilegesSuite.scala
index dede8142693..0ad6b3fead8 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2CommandsPrivilegesSuite.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/V2CommandsPrivilegesSuite.scala
@@ -23,6 +23,7 @@ import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.sql.execution.QueryExecution
import org.apache.kyuubi.plugin.spark.authz.OperationType._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._
import org.apache.kyuubi.plugin.spark.authz.ranger.AccessType
import org.apache.kyuubi.plugin.spark.authz.serde.{Database, DB_COMMAND_SPECS}
@@ -688,8 +689,8 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE)
assert(po.catalog.get === sparkSessionCatalogName)
- assert(po.dbname === "default")
- assert(po.objectName === "default")
+ assert(po.dbname === defaultDb)
+ assert(po.objectName === defaultDb)
assert(po.columns.isEmpty)
}
@@ -732,8 +733,8 @@ abstract class V2CommandsPrivilegesSuite extends PrivilegesBuilderSuite {
assert(po.actionType === PrivilegeObjectActionType.OTHER)
assert(po.privilegeObjectType === PrivilegeObjectType.DATABASE)
assert(po.catalog.get === sparkSessionCatalogName)
- assert(po.dbname === "default")
- assert(po.objectName === "default")
+ assert(po.dbname === defaultDb)
+ assert(po.objectName === defaultDb)
assert(po.columns.isEmpty)
val accessType = ranger.AccessType(po, operationType, isInput = false)
assert(accessType === AccessType.ALTER)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/IcebergCatalogRangerSparkExtensionSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/IcebergCatalogRangerSparkExtensionSuite.scala
index 6b1cedf786f..ba6992362db 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/IcebergCatalogRangerSparkExtensionSuite.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/IcebergCatalogRangerSparkExtensionSuite.scala
@@ -23,6 +23,8 @@ import org.scalatest.Outcome
import org.apache.kyuubi.Utils
import org.apache.kyuubi.plugin.spark.authz.AccessControlException
+import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
/**
* Tests for RangerSparkExtensionSuite
@@ -36,7 +38,7 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
else ""
val catalogV2 = "local"
- val namespace1 = "iceberg_ns"
+ val namespace1 = icebergNamespace
val table1 = "table1"
val outputTable1 = "outputTable1"
@@ -57,18 +59,18 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
super.beforeAll()
- doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace1"))
+ doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace1"))
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table1" +
" (id int, name string, city string) USING iceberg"))
doAs(
- "admin",
+ admin,
sql(s"INSERT INTO $catalogV2.$namespace1.$table1" +
" (id , name , city ) VALUES (1, 'liangbowen','Guangzhou')"))
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$outputTable1" +
" (id int, name string, city string) USING iceberg"))
}
@@ -93,7 +95,7 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
// MergeIntoTable: Using a MERGE INTO Statement
val e1 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(mergeIntoSql)))
assert(e1.getMessage.contains(s"does not have [select] privilege" +
s" on [$namespace1/$table1/id]"))
@@ -104,7 +106,7 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
true)
val e2 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(mergeIntoSql)))
assert(e2.getMessage.contains(s"does not have" +
s" [select] privilege" +
@@ -116,21 +118,21 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
false)
}
- doAs("admin", sql(mergeIntoSql))
+ doAs(admin, sql(mergeIntoSql))
}
test("[KYUUBI #3515] UPDATE TABLE") {
// UpdateTable
val e1 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"UPDATE $catalogV2.$namespace1.$table1 SET city='Guangzhou' " +
" WHERE id=1")))
assert(e1.getMessage.contains(s"does not have [update] privilege" +
s" on [$namespace1/$table1]"))
doAs(
- "admin",
+ admin,
sql(s"UPDATE $catalogV2.$namespace1.$table1 SET city='Guangzhou' " +
" WHERE id=1"))
}
@@ -138,11 +140,11 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
test("[KYUUBI #3515] DELETE FROM TABLE") {
// DeleteFromTable
val e6 = intercept[AccessControlException](
- doAs("someone", sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=2")))
+ doAs(someone, sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=2")))
assert(e6.getMessage.contains(s"does not have [update] privilege" +
s" on [$namespace1/$table1]"))
- doAs("admin", sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=2"))
+ doAs(admin, sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=2"))
}
test("[KYUUBI #3666] Support {OWNER} variable for queries run on CatalogV2") {
@@ -163,7 +165,7 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
}.isSuccess))
doAs(
- "create_only_user", {
+ createOnlyUser, {
val e = intercept[AccessControlException](sql(select).collect())
assert(e.getMessage === errorMessage("select", s"$namespace1/$table/key"))
})
@@ -178,17 +180,17 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
(s"$catalogV2.default.src", "table"),
(s"$catalogV2.default.outputTable2", "table"))) {
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.default.src" +
" (id int, name string, key string) USING iceberg"))
doAs(
- "admin",
+ admin,
sql(s"INSERT INTO $catalogV2.default.src" +
" (id , name , key ) VALUES " +
"(1, 'liangbowen1','10')" +
", (2, 'liangbowen2','20')"))
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$outputTable2" +
" (id int, name string, key string) USING iceberg"))
@@ -200,20 +202,20 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
|WHEN NOT MATCHED THEN INSERT (id, name, key) VALUES (source.id, source.name, source.key)
""".stripMargin
- doAs("admin", sql(mergeIntoSql))
+ doAs(admin, sql(mergeIntoSql))
doAs(
- "admin", {
+ admin, {
val countOutputTable =
sql(s"select count(1) from $catalogV2.$namespace1.$outputTable2").collect()
val rowCount = countOutputTable(0).get(0)
assert(rowCount === 2)
})
- doAs("admin", sql(s"truncate table $catalogV2.$namespace1.$outputTable2"))
+ doAs(admin, sql(s"truncate table $catalogV2.$namespace1.$outputTable2"))
// source table with row filter `key`<20
- doAs("bob", sql(mergeIntoSql))
+ doAs(bob, sql(mergeIntoSql))
doAs(
- "admin", {
+ admin, {
val countOutputTable =
sql(s"select count(1) from $catalogV2.$namespace1.$outputTable2").collect()
val rowCount = countOutputTable(0).get(0)
@@ -224,7 +226,7 @@ class IcebergCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite
test("[KYUUBI #4255] DESCRIBE TABLE") {
val e1 = intercept[AccessControlException](
- doAs("someone", sql(s"DESCRIBE TABLE $catalogV2.$namespace1.$table1").explain()))
+ doAs(someone, sql(s"DESCRIBE TABLE $catalogV2.$namespace1.$table1").explain()))
assert(e1.getMessage.contains(s"does not have [select] privilege" +
s" on [$namespace1/$table1]"))
}
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala
index beef36d5dda..6424832eafb 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/RangerSparkExtensionSuite.scala
@@ -31,6 +31,8 @@ import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite
import org.apache.kyuubi.plugin.spark.authz.{AccessControlException, SparkSessionProvider}
+import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
import org.apache.kyuubi.plugin.spark.authz.ranger.RuleAuthorization.KYUUBI_AUTHZ_TAG
import org.apache.kyuubi.plugin.spark.authz.util.AuthZUtils.getFieldVal
@@ -88,7 +90,7 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
}
test("[KYUUBI #3226] RuleAuthorization: Should check privileges once only.") {
- val logicalPlan = doAs("admin", sql("SHOW TABLES").queryExecution.logical)
+ val logicalPlan = doAs(admin, sql("SHOW TABLES").queryExecution.logical)
val rule = new RuleAuthorization(spark)
(1 until 10).foreach { i =>
@@ -116,7 +118,7 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
withCleanTmpResources(Seq((testTable, "table"))) {
// create tmp table
doAs(
- "admin", {
+ admin, {
sql(create)
// session1: first query, should auth once.[LogicalRelation]
@@ -155,18 +157,18 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
val e = intercept[AccessControlException](sql(create))
assert(e.getMessage === errorMessage("create", "mydb"))
withCleanTmpResources(Seq((testDb, "database"))) {
- doAs("admin", assert(Try { sql(create) }.isSuccess))
- doAs("admin", assert(Try { sql(alter) }.isSuccess))
+ doAs(admin, assert(Try { sql(create) }.isSuccess))
+ doAs(admin, assert(Try { sql(alter) }.isSuccess))
val e1 = intercept[AccessControlException](sql(alter))
assert(e1.getMessage === errorMessage("alter", "mydb"))
val e2 = intercept[AccessControlException](sql(drop))
assert(e2.getMessage === errorMessage("drop", "mydb"))
- doAs("kent", Try(sql("SHOW DATABASES")).isSuccess)
+ doAs(kent, Try(sql("SHOW DATABASES")).isSuccess)
}
}
test("auth: tables") {
- val db = "default"
+ val db = defaultDb
val table = "src"
val col = "key"
@@ -178,14 +180,14 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
assert(e.getMessage === errorMessage("create"))
withCleanTmpResources(Seq((s"$db.$table", "table"))) {
- doAs("bob", assert(Try { sql(create0) }.isSuccess))
- doAs("bob", assert(Try { sql(alter0) }.isSuccess))
+ doAs(bob, assert(Try { sql(create0) }.isSuccess))
+ doAs(bob, assert(Try { sql(alter0) }.isSuccess))
val e1 = intercept[AccessControlException](sql(drop0))
assert(e1.getMessage === errorMessage("drop"))
- doAs("bob", assert(Try { sql(alter0) }.isSuccess))
- doAs("bob", assert(Try { sql(select).collect() }.isSuccess))
- doAs("kent", assert(Try { sql(s"SELECT key FROM $db.$table").collect() }.isSuccess))
+ doAs(bob, assert(Try { sql(alter0) }.isSuccess))
+ doAs(bob, assert(Try { sql(select).collect() }.isSuccess))
+ doAs(kent, assert(Try { sql(s"SELECT key FROM $db.$table").collect() }.isSuccess))
Seq(
select,
@@ -196,10 +198,10 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
s"SELECT key FROM $db.$table WHERE value in (SELECT value as key FROM $db.$table)")
.foreach { q =>
doAs(
- "kent", {
+ kent, {
withClue(q) {
val e = intercept[AccessControlException](sql(q).collect())
- assert(e.getMessage === errorMessage("select", "default/src/value", "kent"))
+ assert(e.getMessage === errorMessage("select", "default/src/value", kent))
}
})
}
@@ -207,15 +209,15 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
}
test("auth: functions") {
- val db = "default"
+ val db = defaultDb
val func = "func"
val create0 = s"CREATE FUNCTION IF NOT EXISTS $db.$func AS 'abc.mnl.xyz'"
doAs(
- "kent", {
+ kent, {
val e = intercept[AccessControlException](sql(create0))
assert(e.getMessage === errorMessage("create", "default/func"))
})
- doAs("admin", assert(Try(sql(create0)).isSuccess))
+ doAs(admin, assert(Try(sql(create0)).isSuccess))
}
test("show tables") {
@@ -226,14 +228,14 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
(s"$db.$table", "table"),
(s"$db.${table}for_show", "table"),
(s"$db", "database"))) {
- doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db"))
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.$table (key int) USING $format"))
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}for_show (key int) USING $format"))
-
- doAs("admin", assert(sql(s"show tables from $db").collect().length === 2))
- doAs("bob", assert(sql(s"show tables from $db").collect().length === 0))
- doAs("i_am_invisible", assert(sql(s"show tables from $db").collect().length === 0))
- doAs("i_am_invisible", assert(sql(s"show tables from $db").limit(1).isEmpty))
+ doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.$table (key int) USING $format"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}for_show (key int) USING $format"))
+
+ doAs(admin, assert(sql(s"show tables from $db").collect().length === 2))
+ doAs(bob, assert(sql(s"show tables from $db").collect().length === 0))
+ doAs(invisibleUser, assert(sql(s"show tables from $db").collect().length === 0))
+ doAs(invisibleUser, assert(sql(s"show tables from $db").limit(1).isEmpty))
}
}
@@ -241,19 +243,19 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
val db = "default2"
withCleanTmpResources(Seq((db, "database"))) {
- doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db"))
- doAs("admin", assert(sql(s"SHOW DATABASES").collect().length == 2))
- doAs("admin", assert(sql(s"SHOW DATABASES").collectAsList().get(0).getString(0) == "default"))
- doAs("admin", assert(sql(s"SHOW DATABASES").collectAsList().get(1).getString(0) == s"$db"))
-
- doAs("bob", assert(sql(s"SHOW DATABASES").collect().length == 1))
- doAs("bob", assert(sql(s"SHOW DATABASES").collectAsList().get(0).getString(0) == "default"))
- doAs("i_am_invisible", assert(sql(s"SHOW DATABASES").limit(1).isEmpty))
+ doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db"))
+ doAs(admin, assert(sql(s"SHOW DATABASES").collect().length == 2))
+ doAs(admin, assert(sql(s"SHOW DATABASES").collectAsList().get(0).getString(0) == defaultDb))
+ doAs(admin, assert(sql(s"SHOW DATABASES").collectAsList().get(1).getString(0) == s"$db"))
+
+ doAs(bob, assert(sql(s"SHOW DATABASES").collect().length == 1))
+ doAs(bob, assert(sql(s"SHOW DATABASES").collectAsList().get(0).getString(0) == defaultDb))
+ doAs(invisibleUser, assert(sql(s"SHOW DATABASES").limit(1).isEmpty))
}
}
test("show functions") {
- val default = "default"
+ val default = defaultDb
val db3 = "default3"
val function1 = "function1"
@@ -261,41 +263,41 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
(s"$default.$function1", "function"),
(s"$db3.$function1", "function"),
(db3, "database"))) {
- doAs("admin", sql(s"CREATE FUNCTION $function1 AS 'Function1'"))
- doAs("admin", assert(sql(s"show user functions $default.$function1").collect().length == 1))
- doAs("bob", assert(sql(s"show user functions $default.$function1").collect().length == 0))
+ doAs(admin, sql(s"CREATE FUNCTION $function1 AS 'Function1'"))
+ doAs(admin, assert(sql(s"show user functions $default.$function1").collect().length == 1))
+ doAs(bob, assert(sql(s"show user functions $default.$function1").collect().length == 0))
- doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db3"))
- doAs("admin", sql(s"CREATE FUNCTION $db3.$function1 AS 'Function1'"))
+ doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db3"))
+ doAs(admin, sql(s"CREATE FUNCTION $db3.$function1 AS 'Function1'"))
- doAs("admin", assert(sql(s"show user functions $db3.$function1").collect().length == 1))
- doAs("bob", assert(sql(s"show user functions $db3.$function1").collect().length == 0))
+ doAs(admin, assert(sql(s"show user functions $db3.$function1").collect().length == 1))
+ doAs(bob, assert(sql(s"show user functions $db3.$function1").collect().length == 0))
- doAs("admin", assert(sql(s"show system functions").collect().length > 0))
- doAs("bob", assert(sql(s"show system functions").collect().length > 0))
+ doAs(admin, assert(sql(s"show system functions").collect().length > 0))
+ doAs(bob, assert(sql(s"show system functions").collect().length > 0))
- val adminSystemFunctionCount = doAs("admin", sql(s"show system functions").collect().length)
- val bobSystemFunctionCount = doAs("bob", sql(s"show system functions").collect().length)
+ val adminSystemFunctionCount = doAs(admin, sql(s"show system functions").collect().length)
+ val bobSystemFunctionCount = doAs(bob, sql(s"show system functions").collect().length)
assert(adminSystemFunctionCount == bobSystemFunctionCount)
}
}
test("show columns") {
- val db = "default"
+ val db = defaultDb
val table = "src"
val col = "key"
val create = s"CREATE TABLE IF NOT EXISTS $db.$table ($col int, value int) USING $format"
withCleanTmpResources(Seq((s"$db.$table", "table"))) {
- doAs("admin", sql(create))
+ doAs(admin, sql(create))
- doAs("admin", assert(sql(s"SHOW COLUMNS IN $table").count() == 2))
- doAs("admin", assert(sql(s"SHOW COLUMNS IN $db.$table").count() == 2))
- doAs("admin", assert(sql(s"SHOW COLUMNS IN $table IN $db").count() == 2))
+ doAs(admin, assert(sql(s"SHOW COLUMNS IN $table").count() == 2))
+ doAs(admin, assert(sql(s"SHOW COLUMNS IN $db.$table").count() == 2))
+ doAs(admin, assert(sql(s"SHOW COLUMNS IN $table IN $db").count() == 2))
- doAs("kent", assert(sql(s"SHOW COLUMNS IN $table").count() == 1))
- doAs("kent", assert(sql(s"SHOW COLUMNS IN $db.$table").count() == 1))
- doAs("kent", assert(sql(s"SHOW COLUMNS IN $table IN $db").count() == 1))
+ doAs(kent, assert(sql(s"SHOW COLUMNS IN $table").count() == 1))
+ doAs(kent, assert(sql(s"SHOW COLUMNS IN $db.$table").count() == 1))
+ doAs(kent, assert(sql(s"SHOW COLUMNS IN $table IN $db").count() == 1))
}
}
@@ -310,24 +312,24 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
(s"$db.${table}_select2", "table"),
(s"$db.${table}_select3", "table"),
(s"$db", "database"))) {
- doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db"))
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_use1 (key int) USING $format"))
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_use2 (key int) USING $format"))
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select1 (key int) USING $format"))
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select2 (key int) USING $format"))
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select3 (key int) USING $format"))
+ doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_use1 (key int) USING $format"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_use2 (key int) USING $format"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select1 (key int) USING $format"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select2 (key int) USING $format"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.${table}_select3 (key int) USING $format"))
doAs(
- "admin",
+ admin,
assert(sql(s"show table extended from $db like '$table*'").collect().length === 5))
doAs(
- "bob",
+ bob,
assert(sql(s"show tables from $db").collect().length === 5))
doAs(
- "bob",
+ bob,
assert(sql(s"show table extended from $db like '$table*'").collect().length === 3))
doAs(
- "i_am_invisible",
+ invisibleUser,
assert(sql(s"show table extended from $db like '$table*'").collect().length === 0))
}
}
@@ -339,48 +341,48 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
val globalTempView2 = "global_temp_view2"
// create or replace view
- doAs("denyuser", sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)"))
+ doAs(denyUser, sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)"))
doAs(
- "denyuser",
+ denyUser,
sql(s"CREATE GLOBAL TEMPORARY VIEW $globalTempView AS SELECT * FROM values(1)"))
// rename view
- doAs("denyuser2", sql(s"ALTER VIEW $tempView RENAME TO $tempView2"))
+ doAs(denyUser2, sql(s"ALTER VIEW $tempView RENAME TO $tempView2"))
doAs(
- "denyuser2",
+ denyUser2,
sql(s"ALTER VIEW global_temp.$globalTempView RENAME TO global_temp.$globalTempView2"))
- doAs("admin", sql(s"DROP VIEW IF EXISTS $tempView2"))
- doAs("admin", sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView2"))
- doAs("admin", assert(sql("show tables from global_temp").collect().length == 0))
+ doAs(admin, sql(s"DROP VIEW IF EXISTS $tempView2"))
+ doAs(admin, sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView2"))
+ doAs(admin, assert(sql("show tables from global_temp").collect().length == 0))
}
test("[KYUUBI #3426] Drop temp view should be skipped permission check") {
val tempView = "temp_view"
val globalTempView = "global_temp_view"
- doAs("denyuser", sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)"))
+ doAs(denyUser, sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)"))
doAs(
- "denyuser",
+ denyUser,
sql(s"CREATE OR REPLACE TEMPORARY VIEW $tempView" +
s" AS select * from values(1)"))
doAs(
- "denyuser",
+ denyUser,
sql(s"CREATE GLOBAL TEMPORARY VIEW $globalTempView AS SELECT * FROM values(1)"))
doAs(
- "denyuser",
+ denyUser,
sql(s"CREATE OR REPLACE GLOBAL TEMPORARY VIEW $globalTempView" +
s" AS select * from values(1)"))
// global_temp will contain the temporary view, even if it is not global
- doAs("admin", assert(sql("show tables from global_temp").collect().length == 2))
+ doAs(admin, assert(sql("show tables from global_temp").collect().length == 2))
- doAs("denyuser2", sql(s"DROP VIEW IF EXISTS $tempView"))
- doAs("denyuser2", sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView"))
+ doAs(denyUser2, sql(s"DROP VIEW IF EXISTS $tempView"))
+ doAs(denyUser2, sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView"))
- doAs("admin", assert(sql("show tables from global_temp").collect().length == 0))
+ doAs(admin, assert(sql("show tables from global_temp").collect().length == 0))
}
test("[KYUUBI #3428] AlterViewAsCommand should be skipped permission check") {
@@ -388,26 +390,26 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
val globalTempView = "global_temp_view"
// create or replace view
- doAs("denyuser", sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)"))
+ doAs(denyUser, sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)"))
doAs(
- "denyuser",
+ denyUser,
sql(s"CREATE OR REPLACE TEMPORARY VIEW $tempView" +
s" AS select * from values(1)"))
doAs(
- "denyuser",
+ denyUser,
sql(s"CREATE GLOBAL TEMPORARY VIEW $globalTempView AS SELECT * FROM values(1)"))
doAs(
- "denyuser",
+ denyUser,
sql(s"CREATE OR REPLACE GLOBAL TEMPORARY VIEW $globalTempView" +
s" AS select * from values(1)"))
// rename view
- doAs("denyuser2", sql(s"ALTER VIEW $tempView AS SELECT * FROM values(1)"))
- doAs("denyuser2", sql(s"ALTER VIEW global_temp.$globalTempView AS SELECT * FROM values(1)"))
+ doAs(denyUser2, sql(s"ALTER VIEW $tempView AS SELECT * FROM values(1)"))
+ doAs(denyUser2, sql(s"ALTER VIEW global_temp.$globalTempView AS SELECT * FROM values(1)"))
- doAs("admin", sql(s"DROP VIEW IF EXISTS $tempView"))
- doAs("admin", sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView"))
- doAs("admin", assert(sql("show tables from global_temp").collect().length == 0))
+ doAs(admin, sql(s"DROP VIEW IF EXISTS $tempView"))
+ doAs(admin, sql(s"DROP VIEW IF EXISTS global_temp.$globalTempView"))
+ doAs(admin, assert(sql("show tables from global_temp").collect().length == 0))
}
test("[KYUUBI #3343] pass temporary view creation") {
@@ -416,28 +418,28 @@ abstract class RangerSparkExtensionSuite extends AnyFunSuite
withTempView(tempView) {
doAs(
- "denyuser",
+ denyUser,
assert(Try(sql(s"CREATE TEMPORARY VIEW $tempView AS select * from values(1)")).isSuccess))
doAs(
- "denyuser",
+ denyUser,
Try(sql(s"CREATE OR REPLACE TEMPORARY VIEW $tempView" +
s" AS select * from values(1)")).isSuccess)
}
withGlobalTempView(globalTempView) {
doAs(
- "denyuser",
+ denyUser,
Try(
sql(
s"CREATE GLOBAL TEMPORARY VIEW $globalTempView AS SELECT * FROM values(1)")).isSuccess)
doAs(
- "denyuser",
+ denyUser,
Try(sql(s"CREATE OR REPLACE GLOBAL TEMPORARY VIEW $globalTempView" +
s" AS select * from values(1)")).isSuccess)
}
- doAs("admin", assert(sql("show tables from global_temp").collect().length == 0))
+ doAs(admin, assert(sql("show tables from global_temp").collect().length == 0))
}
}
@@ -450,9 +452,9 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
test("table stats must be specified") {
val table = "hive_src"
withCleanTmpResources(Seq((table, "table"))) {
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $table (id int)"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $table (id int)"))
doAs(
- "admin", {
+ admin, {
val hiveTableRelation = sql(s"SELECT * FROM $table")
.queryExecution.optimizedPlan.collectLeaves().head.asInstanceOf[HiveTableRelation]
assert(getFieldVal[Option[Statistics]](hiveTableRelation, "tableStats").nonEmpty)
@@ -463,9 +465,9 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
test("HiveTableRelation should be able to be converted to LogicalRelation") {
val table = "hive_src"
withCleanTmpResources(Seq((table, "table"))) {
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $table (id int) STORED AS PARQUET"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $table (id int) STORED AS PARQUET"))
doAs(
- "admin", {
+ admin, {
val relation = sql(s"SELECT * FROM $table")
.queryExecution.optimizedPlan.collectLeaves().head
assert(relation.isInstanceOf[LogicalRelation])
@@ -483,7 +485,7 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
(s"$db.$table1", "table"),
(s"$db", "database"))) {
doAs(
- "admin", {
+ admin, {
sql(s"CREATE DATABASE IF NOT EXISTS $db")
sql(s"CREATE TABLE IF NOT EXISTS $db.$table1(id int) STORED AS PARQUET")
sql(s"INSERT INTO $db.$table1 SELECT 1")
@@ -504,16 +506,16 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
(adminPermView, "view"),
(permView, "view"),
(table, "table"))) {
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $table (id int)"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $table (id int)"))
- doAs("admin", sql(s"CREATE VIEW ${adminPermView} AS SELECT * FROM $table"))
+ doAs(admin, sql(s"CREATE VIEW ${adminPermView} AS SELECT * FROM $table"))
val e1 = intercept[AccessControlException](
- doAs("someone", sql(s"CREATE VIEW $permView AS SELECT 1 as a")))
+ doAs(someone, sql(s"CREATE VIEW $permView AS SELECT 1 as a")))
assert(e1.getMessage.contains(s"does not have [create] privilege on [default/$permView]"))
val e2 = intercept[AccessControlException](
- doAs("someone", sql(s"CREATE VIEW $permView AS SELECT * FROM $table")))
+ doAs(someone, sql(s"CREATE VIEW $permView AS SELECT * FROM $table")))
if (isSparkV32OrGreater) {
assert(e2.getMessage.contains(s"does not have [select] privilege on [default/$table/id]"))
} else {
@@ -523,20 +525,20 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
}
test("[KYUUBI #3326] check persisted view and skip shadowed table") {
- val db1 = "default"
+ val db1 = defaultDb
val table = "hive_src"
val permView = "perm_view"
withCleanTmpResources(Seq(
(s"$db1.$table", "table"),
(s"$db1.$permView", "view"))) {
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
- doAs("admin", sql(s"CREATE VIEW $db1.$permView AS SELECT * FROM $db1.$table"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
+ doAs(admin, sql(s"CREATE VIEW $db1.$permView AS SELECT * FROM $db1.$table"))
// KYUUBI #3326: with no privileges to the permanent view or the source table
val e1 = intercept[AccessControlException](
doAs(
- "someone", {
+ someone, {
sql(s"select * from $db1.$permView").collect()
}))
if (isSparkV31OrGreater) {
@@ -548,16 +550,16 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
}
test("KYUUBI #4504: query permanent view with privilege to permanent view only") {
- val db1 = "default"
+ val db1 = defaultDb
val table = "hive_src"
val permView = "perm_view"
- val userPermViewOnly = "user_perm_view_only"
+ val userPermViewOnly = permViewOnlyUser
withCleanTmpResources(Seq(
(s"$db1.$table", "table"),
(s"$db1.$permView", "view"))) {
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
- doAs("admin", sql(s"CREATE VIEW $db1.$permView AS SELECT * FROM $db1.$table"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
+ doAs(admin, sql(s"CREATE VIEW $db1.$permView AS SELECT * FROM $db1.$table"))
// query all columns of the permanent view
// with access privileges to the permanent view but no privilege to the source table
@@ -582,7 +584,7 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
}
test("[KYUUBI #3371] support throws all disallowed privileges in exception") {
- val db1 = "default"
+ val db1 = defaultDb
val srcTable1 = "hive_src1"
val srcTable2 = "hive_src2"
val sinkTable1 = "hive_sink1"
@@ -592,17 +594,17 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
(s"$db1.$srcTable2", "table"),
(s"$db1.$sinkTable1", "table"))) {
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $db1.$srcTable1" +
s" (id int, name string, city string)"))
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $db1.$srcTable2" +
s" (id int, age int)"))
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $db1.$sinkTable1" +
s" (id int, age int, name string, city string)"))
@@ -611,14 +613,14 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
s" FROM $db1.$srcTable1 as tb1" +
s" JOIN $db1.$srcTable2 as tb2" +
s" on tb1.id = tb2.id"
- val e1 = intercept[AccessControlException](doAs("someone", sql(insertSql1)))
+ val e1 = intercept[AccessControlException](doAs(someone, sql(insertSql1)))
assert(e1.getMessage.contains(s"does not have [select] privilege on [$db1/$srcTable1/id]"))
try {
SparkRangerAdminPlugin.getRangerConf.setBoolean(
s"ranger.plugin.${SparkRangerAdminPlugin.getServiceType}.authorize.in.single.call",
true)
- val e2 = intercept[AccessControlException](doAs("someone", sql(insertSql1)))
+ val e2 = intercept[AccessControlException](doAs(someone, sql(insertSql1)))
assert(e2.getMessage.contains(s"does not have" +
s" [select] privilege on" +
s" [$db1/$srcTable1/id,$db1/$srcTable1/name,$db1/$srcTable1/city," +
@@ -637,7 +639,7 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
test("[KYUUBI #3411] skip checking cache table") {
if (isSparkV32OrGreater) { // cache table sql supported since 3.2.0
- val db1 = "default"
+ val db1 = defaultDb
val srcTable1 = "hive_src1"
val cacheTable1 = "cacheTable1"
val cacheTable2 = "cacheTable2"
@@ -652,23 +654,23 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
(s"$db1.$cacheTable4", "cache"))) {
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $db1.$srcTable1" +
s" (id int, name string, city string)"))
val e1 = intercept[AccessControlException](
- doAs("someone", sql(s"CACHE TABLE $cacheTable2 select * from $db1.$srcTable1")))
+ doAs(someone, sql(s"CACHE TABLE $cacheTable2 select * from $db1.$srcTable1")))
assert(
e1.getMessage.contains(s"does not have [select] privilege on [$db1/$srcTable1/id]"))
- doAs("admin", sql(s"CACHE TABLE $cacheTable3 SELECT 1 AS a, 2 AS b "))
- doAs("someone", sql(s"CACHE TABLE $cacheTable4 select 1 as a, 2 as b "))
+ doAs(admin, sql(s"CACHE TABLE $cacheTable3 SELECT 1 AS a, 2 AS b "))
+ doAs(someone, sql(s"CACHE TABLE $cacheTable4 select 1 as a, 2 as b "))
}
}
}
test("[KYUUBI #3608] Support {OWNER} variable for queries") {
- val db = "default"
+ val db = defaultDb
val table = "owner_variable"
val select = s"SELECT key FROM $db.$table"
@@ -687,7 +689,7 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
}.isSuccess))
doAs(
- "create_only_user", {
+ createOnlyUser, {
val e = intercept[AccessControlException](sql(select).collect())
assert(e.getMessage === errorMessage("select", s"$db/$table/key"))
})
@@ -701,22 +703,22 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
Seq(
(s"$db.$table", "table"),
(s"$db", "database"))) {
- doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $db"))
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db.$table (key int) USING $format"))
+ doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $db"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db.$table (key int) USING $format"))
sql("SHOW DATABASES").queryExecution.optimizedPlan.stats
sql(s"SHOW TABLES IN $db").queryExecution.optimizedPlan.stats
}
}
test("[KYUUBI #4658] insert overwrite hive directory") {
- val db1 = "default"
+ val db1 = defaultDb
val table = "src"
withCleanTmpResources(Seq((s"$db1.$table", "table"))) {
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
val e = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(
s"""INSERT OVERWRITE DIRECTORY '/tmp/test_dir' ROW FORMAT DELIMITED FIELDS
| TERMINATED BY ','
@@ -726,14 +728,14 @@ class HiveCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSuite {
}
test("[KYUUBI #4658] insert overwrite datasource directory") {
- val db1 = "default"
+ val db1 = defaultDb
val table = "src"
withCleanTmpResources(Seq((s"$db1.$table", "table"))) {
- doAs("admin", sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
+ doAs(admin, sql(s"CREATE TABLE IF NOT EXISTS $db1.$table (id int, name string)"))
val e = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(
s"""INSERT OVERWRITE DIRECTORY '/tmp/test_dir'
| USING parquet
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala
index 3338a331450..301ae87c553 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/SparkRangerAdminPluginSuite.scala
@@ -22,6 +22,8 @@ import org.apache.hadoop.security.UserGroupInformation
import org.scalatest.funsuite.AnyFunSuite
import org.apache.kyuubi.plugin.spark.authz.{ObjectType, OperationType}
+import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
import org.apache.kyuubi.plugin.spark.authz.ranger.SparkRangerAdminPlugin._
class SparkRangerAdminPluginSuite extends AnyFunSuite {
@@ -29,13 +31,13 @@ class SparkRangerAdminPluginSuite extends AnyFunSuite {
test("get filter expression") {
val bob = UserGroupInformation.createRemoteUser("bob")
- val are = AccessResource(ObjectType.TABLE, "default", "src", null)
+ val are = AccessResource(ObjectType.TABLE, defaultDb, "src", null)
def buildAccessRequest(ugi: UserGroupInformation): AccessRequest = {
AccessRequest(are, ugi, OperationType.QUERY, AccessType.SELECT)
}
val maybeString = getFilterExpr(buildAccessRequest(bob))
assert(maybeString.get === "key<20")
- Seq("admin", "alice").foreach { user =>
+ Seq(admin, alice).foreach { user =>
val ugi = UserGroupInformation.createRemoteUser(user)
val maybeString = getFilterExpr(buildAccessRequest(ugi))
assert(maybeString.isEmpty)
@@ -45,7 +47,7 @@ class SparkRangerAdminPluginSuite extends AnyFunSuite {
test("get data masker") {
val bob = UserGroupInformation.createRemoteUser("bob")
def buildAccessRequest(ugi: UserGroupInformation, column: String): AccessRequest = {
- val are = AccessResource(ObjectType.COLUMN, "default", "src", column)
+ val are = AccessResource(ObjectType.COLUMN, defaultDb, "src", column)
AccessRequest(are, ugi, OperationType.QUERY, AccessType.SELECT)
}
assert(getMaskingExpr(buildAccessRequest(bob, "value1")).get === "md5(cast(value1 as string))")
@@ -59,7 +61,7 @@ class SparkRangerAdminPluginSuite extends AnyFunSuite {
"left(value5, length(value5) - 4), '[A-Z]', 'X'), '[a-z]', 'x')," +
" '[0-9]', 'n'), '[^A-Za-z0-9]', 'U'), right(value5, 4))")
- Seq("admin", "alice").foreach { user =>
+ Seq(admin, alice).foreach { user =>
val ugi = UserGroupInformation.createRemoteUser(user)
val maybeString = getMaskingExpr(buildAccessRequest(ugi, "value1"))
assert(maybeString.isEmpty)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/V2JdbcTableCatalogRangerSparkExtensionSuite.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/V2JdbcTableCatalogRangerSparkExtensionSuite.scala
index 73a13bc1c3c..07fe0ae5a68 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/V2JdbcTableCatalogRangerSparkExtensionSuite.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/V2JdbcTableCatalogRangerSparkExtensionSuite.scala
@@ -22,6 +22,8 @@ import scala.util.Try
// scalastyle:off
import org.apache.kyuubi.plugin.spark.authz.AccessControlException
+import org.apache.kyuubi.plugin.spark.authz.RangerTestNamespace._
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
/**
* Tests for RangerSparkExtensionSuite
@@ -32,8 +34,6 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
val catalogV2 = "testcat"
val jdbcCatalogV2 = "jdbc2"
- val namespace1 = "ns1"
- val namespace2 = "ns2"
val table1 = "table1"
val table2 = "table2"
val outputTable1 = "outputTable1"
@@ -54,13 +54,13 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
super.beforeAll()
- doAs("admin", sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace1"))
+ doAs(admin, sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace1"))
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table1" +
" (id int, name string, city string)"))
doAs(
- "admin",
+ admin,
sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$outputTable1" +
" (id int, name string, city string)"))
}
@@ -82,7 +82,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// create database
val e1 = intercept[AccessControlException](
- doAs("someone", sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace2").explain()))
+ doAs(someone, sql(s"CREATE DATABASE IF NOT EXISTS $catalogV2.$namespace2").explain()))
assert(e1.getMessage.contains(s"does not have [create] privilege" +
s" on [$namespace2]"))
}
@@ -92,7 +92,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// create database
val e1 = intercept[AccessControlException](
- doAs("someone", sql(s"DROP DATABASE IF EXISTS $catalogV2.$namespace2").explain()))
+ doAs(someone, sql(s"DROP DATABASE IF EXISTS $catalogV2.$namespace2").explain()))
assert(e1.getMessage.contains(s"does not have [drop] privilege" +
s" on [$namespace2]"))
}
@@ -102,7 +102,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// select
val e1 = intercept[AccessControlException](
- doAs("someone", sql(s"select city, id from $catalogV2.$namespace1.$table1").explain()))
+ doAs(someone, sql(s"select city, id from $catalogV2.$namespace1.$table1").explain()))
assert(e1.getMessage.contains(s"does not have [select] privilege" +
s" on [$namespace1/$table1/city]"))
}
@@ -110,7 +110,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
test("[KYUUBI #4255] DESCRIBE TABLE") {
assume(isSparkV31OrGreater)
val e1 = intercept[AccessControlException](
- doAs("someone", sql(s"DESCRIBE TABLE $catalogV2.$namespace1.$table1").explain()))
+ doAs(someone, sql(s"DESCRIBE TABLE $catalogV2.$namespace1.$table1").explain()))
assert(e1.getMessage.contains(s"does not have [select] privilege" +
s" on [$namespace1/$table1]"))
}
@@ -120,14 +120,14 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// CreateTable
val e2 = intercept[AccessControlException](
- doAs("someone", sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table2")))
+ doAs(someone, sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table2")))
assert(e2.getMessage.contains(s"does not have [create] privilege" +
s" on [$namespace1/$table2]"))
// CreateTableAsSelect
val e21 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"CREATE TABLE IF NOT EXISTS $catalogV2.$namespace1.$table2" +
s" AS select * from $catalogV2.$namespace1.$table1")))
assert(e21.getMessage.contains(s"does not have [select] privilege" +
@@ -139,7 +139,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// DropTable
val e3 = intercept[AccessControlException](
- doAs("someone", sql(s"DROP TABLE $catalogV2.$namespace1.$table1")))
+ doAs(someone, sql(s"DROP TABLE $catalogV2.$namespace1.$table1")))
assert(e3.getMessage.contains(s"does not have [drop] privilege" +
s" on [$namespace1/$table1]"))
}
@@ -150,7 +150,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// AppendData: Insert Using a VALUES Clause
val e4 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"INSERT INTO $catalogV2.$namespace1.$outputTable1 (id, name, city)" +
s" VALUES (1, 'bowenliang123', 'Guangzhou')")))
assert(e4.getMessage.contains(s"does not have [update] privilege" +
@@ -159,7 +159,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// AppendData: Insert Using a TABLE Statement
val e42 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"INSERT INTO $catalogV2.$namespace1.$outputTable1 (id, name, city)" +
s" TABLE $catalogV2.$namespace1.$table1")))
assert(e42.getMessage.contains(s"does not have [select] privilege" +
@@ -168,7 +168,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// AppendData: Insert Using a SELECT Statement
val e43 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"INSERT INTO $catalogV2.$namespace1.$outputTable1 (id, name, city)" +
s" SELECT * from $catalogV2.$namespace1.$table1")))
assert(e43.getMessage.contains(s"does not have [select] privilege" +
@@ -177,7 +177,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// OverwriteByExpression: Insert Overwrite
val e44 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"INSERT OVERWRITE $catalogV2.$namespace1.$outputTable1 (id, name, city)" +
s" VALUES (1, 'bowenliang123', 'Guangzhou')")))
assert(e44.getMessage.contains(s"does not have [update] privilege" +
@@ -199,7 +199,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// MergeIntoTable: Using a MERGE INTO Statement
val e1 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(mergeIntoSql)))
assert(e1.getMessage.contains(s"does not have [select] privilege" +
s" on [$namespace1/$table1/id]"))
@@ -210,7 +210,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
true)
val e2 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(mergeIntoSql)))
assert(e2.getMessage.contains(s"does not have" +
s" [select] privilege" +
@@ -229,7 +229,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// UpdateTable
val e5 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"UPDATE $catalogV2.$namespace1.$table1 SET city='Hangzhou' " +
" WHERE id=1")))
assert(e5.getMessage.contains(s"does not have [update] privilege" +
@@ -241,7 +241,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// DeleteFromTable
val e6 = intercept[AccessControlException](
- doAs("someone", sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=1")))
+ doAs(someone, sql(s"DELETE FROM $catalogV2.$namespace1.$table1 WHERE id=1")))
assert(e6.getMessage.contains(s"does not have [update] privilege" +
s" on [$namespace1/$table1]"))
}
@@ -252,7 +252,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// CacheTable
val e7 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"CACHE TABLE $cacheTable1" +
s" AS select * from $catalogV2.$namespace1.$table1")))
if (isSparkV32OrGreater) {
@@ -269,7 +269,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
val e1 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"TRUNCATE TABLE $catalogV2.$namespace1.$table1")))
assert(e1.getMessage.contains(s"does not have [update] privilege" +
s" on [$namespace1/$table1]"))
@@ -280,7 +280,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
val e1 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"MSCK REPAIR TABLE $catalogV2.$namespace1.$table1")))
assert(e1.getMessage.contains(s"does not have [alter] privilege" +
s" on [$namespace1/$table1]"))
@@ -292,7 +292,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// AddColumns
val e61 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"ALTER TABLE $catalogV2.$namespace1.$table1 ADD COLUMNS (age int) ").explain()))
assert(e61.getMessage.contains(s"does not have [alter] privilege" +
s" on [$namespace1/$table1]"))
@@ -300,7 +300,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// DropColumns
val e62 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"ALTER TABLE $catalogV2.$namespace1.$table1 DROP COLUMNS city ").explain()))
assert(e62.getMessage.contains(s"does not have [alter] privilege" +
s" on [$namespace1/$table1]"))
@@ -308,7 +308,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// RenameColumn
val e63 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"ALTER TABLE $catalogV2.$namespace1.$table1 RENAME COLUMN city TO city2 ").explain()))
assert(e63.getMessage.contains(s"does not have [alter] privilege" +
s" on [$namespace1/$table1]"))
@@ -316,7 +316,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// AlterColumn
val e64 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"ALTER TABLE $catalogV2.$namespace1.$table1 " +
s"ALTER COLUMN city COMMENT 'city' ")))
assert(e64.getMessage.contains(s"does not have [alter] privilege" +
@@ -329,7 +329,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// CommentOnNamespace
val e1 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"COMMENT ON DATABASE $catalogV2.$namespace1 IS 'xYz' ").explain()))
assert(e1.getMessage.contains(s"does not have [alter] privilege" +
s" on [$namespace1]"))
@@ -337,7 +337,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// CommentOnNamespace
val e2 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"COMMENT ON NAMESPACE $catalogV2.$namespace1 IS 'xYz' ").explain()))
assert(e2.getMessage.contains(s"does not have [alter] privilege" +
s" on [$namespace1]"))
@@ -345,7 +345,7 @@ class V2JdbcTableCatalogRangerSparkExtensionSuite extends RangerSparkExtensionSu
// CommentOnTable
val e3 = intercept[AccessControlException](
doAs(
- "someone",
+ someone,
sql(s"COMMENT ON TABLE $catalogV2.$namespace1.$table1 IS 'xYz' ").explain()))
assert(e3.getMessage.contains(s"does not have [alter] privilege" +
s" on [$namespace1/$table1]"))
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala
index 29a70931152..bae269e7aa9 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/datamasking/DataMaskingTestBase.scala
@@ -17,16 +17,17 @@
package org.apache.kyuubi.plugin.spark.authz.ranger.datamasking
-// scalastyle:off
import java.sql.Timestamp
import scala.util.Try
+// scalastyle:off
import org.apache.commons.codec.digest.DigestUtils.md5Hex
import org.apache.spark.sql.{Row, SparkSessionExtensions}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
import org.apache.kyuubi.plugin.spark.authz.SparkSessionProvider
import org.apache.kyuubi.plugin.spark.authz.ranger.RangerSparkExtension
@@ -75,18 +76,18 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
}
override def beforeAll(): Unit = {
- doAs("admin", setup())
+ doAs(admin, setup())
super.beforeAll()
}
override def afterAll(): Unit = {
- doAs("admin", cleanup())
+ doAs(admin, cleanup())
spark.stop
super.afterAll()
}
test("simple query with a user doesn't have mask rules") {
checkAnswer(
- "kent",
+ kent,
"SELECT key FROM default.src order by key",
Seq(Row(1), Row(10), Row(11), Row(20), Row(30)))
}
@@ -95,12 +96,12 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
val result =
Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld"))
checkAnswer(
- "bob",
+ bob,
"SELECT value1, value2, value3, value4, value5 FROM default.src " +
"where key = 1",
result)
checkAnswer(
- "bob",
+ bob,
"SELECT value1 as key, value2, value3, value4, value5 FROM default.src where key = 1",
result)
}
@@ -108,14 +109,14 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
test("star") {
val result =
Seq(Row(1, md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld"))
- checkAnswer("bob", "SELECT * FROM default.src where key = 1", result)
+ checkAnswer(bob, "SELECT * FROM default.src where key = 1", result)
}
test("simple udf") {
val result =
Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld"))
checkAnswer(
- "bob",
+ bob,
"SELECT max(value1), max(value2), max(value3), max(value4), max(value5) FROM default.src" +
" where key = 1",
result)
@@ -125,7 +126,7 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
val result =
Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld"))
checkAnswer(
- "bob",
+ bob,
"SELECT coalesce(max(value1), 1), coalesce(max(value2), 1), coalesce(max(value3), 1), " +
"coalesce(max(value4), timestamp '2018-01-01 22:33:44'), coalesce(max(value5), 1) " +
"FROM default.src where key = 1",
@@ -136,7 +137,7 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
val result =
Seq(Row(md5Hex("1"), "xxxxx", "worlx", Timestamp.valueOf("2018-01-01 00:00:00"), "Xorld"))
checkAnswer(
- "bob",
+ bob,
"SELECT value1, value2, value3, value4, value5 FROM default.src WHERE value2 in " +
"(SELECT value2 as key FROM default.src where key = 1)",
result)
@@ -145,59 +146,59 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
test("create a unmasked table as select from a masked one") {
withCleanTmpResources(Seq(("default.src2", "table"))) {
doAs(
- "bob",
+ bob,
sql(s"CREATE TABLE default.src2 $format AS SELECT value1 FROM default.src " +
s"where key = 1"))
- checkAnswer("bob", "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1"))))
+ checkAnswer(bob, "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1"))))
}
}
test("insert into a unmasked table from a masked one") {
withCleanTmpResources(Seq(("default.src2", "table"), ("default.src3", "table"))) {
- doAs("bob", sql(s"CREATE TABLE default.src2 (value1 string) $format"))
+ doAs(bob, sql(s"CREATE TABLE default.src2 (value1 string) $format"))
doAs(
- "bob",
+ bob,
sql(s"INSERT INTO default.src2 SELECT value1 from default.src " +
s"where key = 1"))
doAs(
- "bob",
+ bob,
sql(s"INSERT INTO default.src2 SELECT value1 as v from default.src " +
s"where key = 1"))
- checkAnswer("bob", "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1")), Row(md5Hex("1"))))
- doAs("bob", sql(s"CREATE TABLE default.src3 (k int, value string) $format"))
+ checkAnswer(bob, "SELECT value1 FROM default.src2", Seq(Row(md5Hex("1")), Row(md5Hex("1"))))
+ doAs(bob, sql(s"CREATE TABLE default.src3 (k int, value string) $format"))
doAs(
- "bob",
+ bob,
sql(s"INSERT INTO default.src3 SELECT key, value1 from default.src " +
s"where key = 1"))
doAs(
- "bob",
+ bob,
sql(s"INSERT INTO default.src3 SELECT key, value1 as v from default.src " +
s"where key = 1"))
- checkAnswer("bob", "SELECT value FROM default.src3", Seq(Row(md5Hex("1")), Row(md5Hex("1"))))
+ checkAnswer(bob, "SELECT value FROM default.src3", Seq(Row(md5Hex("1")), Row(md5Hex("1"))))
}
}
test("join on an unmasked table") {
val s = "SELECT a.value1, b.value1 FROM default.src a" +
" join default.unmasked b on a.value1=b.value1"
- checkAnswer("bob", s, Nil)
- checkAnswer("bob", s, Nil) // just for testing query multiple times, don't delete it
+ checkAnswer(bob, s, Nil)
+ checkAnswer(bob, s, Nil) // just for testing query multiple times, don't delete it
}
test("self join on a masked table") {
val s = "SELECT a.value1, b.value1 FROM default.src a" +
" join default.src b on a.value1=b.value1 where a.key = 1 and b.key = 1 "
- checkAnswer("bob", s, Seq(Row(md5Hex("1"), md5Hex("1"))))
+ checkAnswer(bob, s, Seq(Row(md5Hex("1"), md5Hex("1"))))
// just for testing query multiple times, don't delete it
- checkAnswer("bob", s, Seq(Row(md5Hex("1"), md5Hex("1"))))
+ checkAnswer(bob, s, Seq(Row(md5Hex("1"), md5Hex("1"))))
}
test("self join on a masked table and filter the masked column with original value") {
val s = "SELECT a.value1, b.value1 FROM default.src a" +
" join default.src b on a.value1=b.value1" +
" where a.value1='1' and b.value1='1'"
- checkAnswer("bob", s, Nil)
- checkAnswer("bob", s, Nil) // just for testing query multiple times, don't delete it
+ checkAnswer(bob, s, Nil)
+ checkAnswer(bob, s, Nil) // just for testing query multiple times, don't delete it
}
test("self join on a masked table and filter the masked column with masked value") {
@@ -245,7 +246,7 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
// +- DataMaskingStage0Marker Relation default.src[key#60,value1#61,value2#62,value3#63,value4#64,value5#65] parquet
// +- Project [key#153, md5(cast(cast(value1#154 as string) as binary)) AS value1#148, regexp_replace(regexp_replace(regexp_replace(value2#155, [A-Z], X, 1), [a-z], x, 1), [0-9], n, 1) AS value2#149, regexp_replace(regexp_replace(regexp_replace(value3#156, [A-Z], X, 5), [a-z], x, 5), [0-9], n, 5) AS value3#150, date_trunc(YEAR, value4#157, Some(Asia/Shanghai)) AS value4#151, concat(regexp_replace(regexp_replace(regexp_replace(left(value5#158, (length(value5#158) - 4)), [A-Z], X, 1), [a-z], x, 1), [0-9], n, 1), right(value5#158, 4)) AS value5#152]
// +- Relation default.src[key#153,value1#154,value2#155,value3#156,value4#157,value5#158] parquet
- // checkAnswer("bob", s, Seq(Row(md5Hex("1"), md5Hex("1"))))
+ // checkAnswer(bob, s, Seq(Row(md5Hex("1"), md5Hex("1"))))
//
//
// scalastyle:on
@@ -254,9 +255,9 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
val s2 = "SELECT a.value1, b.value1 FROM default.src a" +
" join default.src b on a.value1=b.value1" +
s" where a.value2='xxxxx' and b.value2='xxxxx'"
- checkAnswer("bob", s2, Seq(Row(md5Hex("1"), md5Hex("1"))))
+ checkAnswer(bob, s2, Seq(Row(md5Hex("1"), md5Hex("1"))))
// just for testing query multiple times, don't delete it
- checkAnswer("bob", s2, Seq(Row(md5Hex("1"), md5Hex("1"))))
+ checkAnswer(bob, s2, Seq(Row(md5Hex("1"), md5Hex("1"))))
}
test("union an unmasked table") {
@@ -267,30 +268,30 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
(SELECT b.value1 FROM default.unmasked b)
) c order by value1
"""
- doAs("bob", sql(s).show)
- checkAnswer("bob", s, Seq(Row("1"), Row("2"), Row("3"), Row("4"), Row("5"), Row(md5Hex("1"))))
+ doAs(bob, sql(s).show)
+ checkAnswer(bob, s, Seq(Row("1"), Row("2"), Row("3"), Row("4"), Row("5"), Row(md5Hex("1"))))
}
test("union a masked table") {
val s = "SELECT a.value1 FROM default.src a where a.key = 1 union" +
" (SELECT b.value1 FROM default.src b where b.key = 1)"
- checkAnswer("bob", s, Seq(Row(md5Hex("1"))))
+ checkAnswer(bob, s, Seq(Row(md5Hex("1"))))
}
test("KYUUBI #3581: permanent view should lookup rule on itself not the raw table") {
assume(isSparkV31OrGreater)
val supported = doAs(
- "perm_view_user",
+ permViewUser,
Try(sql("CREATE OR REPLACE VIEW default.perm_view AS SELECT * FROM default.src")).isSuccess)
assume(supported, s"view support for '$format' has not been implemented yet")
withCleanTmpResources(Seq(("default.perm_view", "view"))) {
checkAnswer(
- "perm_view_user",
+ permViewUser,
"SELECT value1, value2 FROM default.src where key = 1",
Seq(Row(1, "hello")))
checkAnswer(
- "perm_view_user",
+ permViewUser,
"SELECT value1, value2 FROM default.perm_view where key = 1",
Seq(Row(md5Hex("1"), "hello")))
}
@@ -303,7 +304,7 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
val s2 = s"SELECT * FROM default.src where key = 11"
// scalastyle:off
checkAnswer(
- "bob",
+ bob,
s1,
Seq(Row(
10,
@@ -313,7 +314,7 @@ trait DataMaskingTestBase extends AnyFunSuite with SparkSessionProvider with Bef
Timestamp.valueOf("2018-01-01 00:00:00"),
"xxxxxUXXXXUnnnUUUUUUXUUUUUUUUUア叶葉엽")))
checkAnswer(
- "bob",
+ bob,
s2,
Seq(Row(
11,
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringTestBase.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringTestBase.scala
index a73690724e4..3236c97b1c0 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringTestBase.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/ranger/rowfiltering/RowFilteringTestBase.scala
@@ -24,6 +24,7 @@ import org.apache.spark.sql.{Row, SparkSessionExtensions}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite
+import org.apache.kyuubi.plugin.spark.authz.RangerTestUsers._
import org.apache.kyuubi.plugin.spark.authz.SparkSessionProvider
import org.apache.kyuubi.plugin.spark.authz.ranger.RangerSparkExtension
@@ -47,72 +48,72 @@ trait RowFilteringTestBase extends AnyFunSuite with SparkSessionProvider with Be
}
override def beforeAll(): Unit = {
- doAs("admin", setup())
+ doAs(admin, setup())
super.beforeAll()
}
override def afterAll(): Unit = {
- doAs("admin", cleanup())
+ doAs(admin, cleanup())
spark.stop
super.afterAll()
}
test("user without row filtering rule") {
checkAnswer(
- "kent",
+ kent,
"SELECT key FROM default.src order order by key",
Seq(Row(1), Row(20), Row(30)))
}
test("simple query projecting filtering column") {
- checkAnswer("bob", "SELECT key FROM default.src", Seq(Row(1)))
+ checkAnswer(bob, "SELECT key FROM default.src", Seq(Row(1)))
}
test("simple query projecting non filtering column") {
- checkAnswer("bob", "SELECT value FROM default.src", Seq(Row(1)))
+ checkAnswer(bob, "SELECT value FROM default.src", Seq(Row(1)))
}
test("simple query projecting non filtering column with udf max") {
- checkAnswer("bob", "SELECT max(value) FROM default.src", Seq(Row(1)))
+ checkAnswer(bob, "SELECT max(value) FROM default.src", Seq(Row(1)))
}
test("simple query projecting non filtering column with udf coalesce") {
- checkAnswer("bob", "SELECT coalesce(max(value), 1) FROM default.src", Seq(Row(1)))
+ checkAnswer(bob, "SELECT coalesce(max(value), 1) FROM default.src", Seq(Row(1)))
}
test("in subquery") {
checkAnswer(
- "bob",
+ bob,
"SELECT value FROM default.src WHERE value in (SELECT value as key FROM default.src)",
Seq(Row(1)))
}
test("ctas") {
withCleanTmpResources(Seq(("default.src2", "table"))) {
- doAs("bob", sql(s"CREATE TABLE default.src2 $format AS SELECT value FROM default.src"))
+ doAs(bob, sql(s"CREATE TABLE default.src2 $format AS SELECT value FROM default.src"))
val query = "select value from default.src2"
- checkAnswer("admin", query, Seq(Row(1)))
- checkAnswer("bob", query, Seq(Row(1)))
+ checkAnswer(admin, query, Seq(Row(1)))
+ checkAnswer(bob, query, Seq(Row(1)))
}
}
test("[KYUUBI #3581]: row level filter on permanent view") {
assume(isSparkV31OrGreater)
val supported = doAs(
- "perm_view_user",
+ permViewUser,
Try(sql("CREATE OR REPLACE VIEW default.perm_view AS SELECT * FROM default.src")).isSuccess)
assume(supported, s"view support for '$format' has not been implemented yet")
withCleanTmpResources(Seq((s"default.perm_view", "view"))) {
checkAnswer(
- "admin",
+ admin,
"SELECT key FROM default.perm_view order order by key",
Seq(Row(1), Row(20), Row(30)))
- checkAnswer("bob", "SELECT key FROM default.perm_view", Seq(Row(1)))
- checkAnswer("bob", "SELECT value FROM default.perm_view", Seq(Row(1)))
- checkAnswer("bob", "SELECT max(value) FROM default.perm_view", Seq(Row(1)))
- checkAnswer("bob", "SELECT coalesce(max(value), 1) FROM default.perm_view", Seq(Row(1)))
+ checkAnswer(bob, "SELECT key FROM default.perm_view", Seq(Row(1)))
+ checkAnswer(bob, "SELECT value FROM default.perm_view", Seq(Row(1)))
+ checkAnswer(bob, "SELECT max(value) FROM default.perm_view", Seq(Row(1)))
+ checkAnswer(bob, "SELECT coalesce(max(value), 1) FROM default.perm_view", Seq(Row(1)))
checkAnswer(
- "bob",
+ bob,
"SELECT value FROM default.perm_view WHERE value in " +
"(SELECT value as key FROM default.perm_view)",
Seq(Row(1)))
From cba1be97399c2f8370fd1120059916511591e2cf Mon Sep 17 00:00:00 2001
From: packyan
Date: Thu, 20 Apr 2023 09:41:08 +0800
Subject: [PATCH 057/404] [KYUUBI #4717] [AUTHZ] Check Authz plugin's spec json
files in UT
### _Why are the changes needed?_
to close #4715
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4717 from packyan/imporve_authz_spec_json_should_be_generated_in_each_build.
Closes #4717
88e70daa7 [Deng An] Update JsonSpecFileGenerator.scala
d195a6db7 [Deng An] Merge branch 'master' into imporve_authz_spec_json_should_be_generated_in_each_build
a078c8c53 [packyan] add ut for check or generate spec json files.
Lead-authored-by: packyan
Co-authored-by: Deng An <36296995+packyan@users.noreply.github.com>
Co-authored-by: Deng An
Signed-off-by: liangbowen
---
.../authz/gen/JsonSpecFileGenerator.scala | 55 ++++++++++++++-----
1 file changed, 41 insertions(+), 14 deletions(-)
diff --git a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
index 7c7ed138b27..c95685f3427 100644
--- a/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
+++ b/extensions/spark/kyuubi-spark-authz/src/test/scala/org/apache/kyuubi/plugin/spark/authz/gen/JsonSpecFileGenerator.scala
@@ -18,37 +18,64 @@
package org.apache.kyuubi.plugin.spark.authz.gen
import java.nio.charset.StandardCharsets
-import java.nio.file.{Files, Paths}
+import java.nio.file.{Files, Paths, StandardOpenOption}
+
+import org.apache.commons.io.FileUtils
+//scalastyle:off
+import org.scalatest.funsuite.AnyFunSuite
import org.apache.kyuubi.plugin.spark.authz.serde.{mapper, CommandSpec}
/**
* Generates the default command specs to src/main/resources dir.
*
- * Usage:
- * mvn scala:run -DmainClass=this class -pl :kyuubi-spark-authz_2.12
+ * To run the test suite:
+ * build/mvn clean test -Pgen-policy -pl :kyuubi-spark-authz_2.12 -Dtest=none
+ * -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.JsonSpecFileGenerator
+ *
+ * To regenerate the ranger policy file:
+ * KYUUBI_UPDATE=1 build/mvn clean test -Pgen-policy -pl :kyuubi-spark-authz_2.12 -Dtest=none
+ * -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.JsonSpecFileGenerator
*/
-object JsonSpecFileGenerator {
- def main(args: Array[String]): Unit = {
+class JsonSpecFileGenerator extends AnyFunSuite {
+ // scalastyle:on
+ test("check spec json files") {
writeCommandSpecJson("database", DatabaseCommands.data)
writeCommandSpecJson("table", TableCommands.data ++ IcebergCommands.data)
writeCommandSpecJson("function", FunctionCommands.data)
writeCommandSpecJson("scan", Scans.data)
}
- def writeCommandSpecJson[T <: CommandSpec](commandType: String, specArr: Array[T]): Unit = {
+ def writeCommandSpecJson[T <: CommandSpec](
+ commandType: String,
+ specArr: Array[T]): Unit = {
val pluginHome = getClass.getProtectionDomain.getCodeSource.getLocation.getPath
.split("target").head
val filename = s"${commandType}_command_spec.json"
- val writer = {
- val p = Paths.get(pluginHome, "src", "main", "resources", filename)
- Files.newBufferedWriter(p, StandardCharsets.UTF_8)
+ val filePath = Paths.get(pluginHome, "src", "main", "resources", filename)
+
+ val generatedStr = mapper.writerWithDefaultPrettyPrinter()
+ .writeValueAsString(specArr.sortBy(_.classname))
+
+ if (sys.env.get("KYUUBI_UPDATE").contains("1")) {
+ // scalastyle:off println
+ println(s"writing ${specArr.length} specs to $filename")
+ // scalastyle:on println
+ Files.write(
+ filePath,
+ generatedStr.getBytes(StandardCharsets.UTF_8),
+ StandardOpenOption.CREATE,
+ StandardOpenOption.TRUNCATE_EXISTING)
+ } else {
+ val existedFileContent =
+ FileUtils.readFileToString(filePath.toFile, StandardCharsets.UTF_8)
+ withClue(s"Check $filename failed. Please regenerate the ranger policy file by running"
+ + "`KYUUBI_UPDATE=1 build/mvn clean test -Pgen-policy"
+ + " -pl :kyuubi-spark-authz_2.12 -Dtest=none"
+ + " -DwildcardSuites=org.apache.kyuubi.plugin.spark.authz.gen.JsonSpecFileGenerator`.") {
+ assert(generatedStr.equals(existedFileContent))
+ }
}
- // scalastyle:off println
- println(s"writing ${specArr.length} specs to $filename")
- // scalastyle:on println
- mapper.writerWithDefaultPrettyPrinter().writeValue(writer, specArr.sortBy(_.classname))
- writer.close()
}
}
From c6571344b9623f7db7acaeb04bfcc2eeecb493e1 Mon Sep 17 00:00:00 2001
From: Kent Yao
Date: Thu, 20 Apr 2023 17:44:15 +0800
Subject: [PATCH 058/404] [KYUUBI #4737] Restore Project & Community Status in
README.md
### _Why are the changes needed?_
This PR adds back those Project & Community Status badges to readme
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
https://github.com/apache/kyuubi/tree/yaooqinn-patch-2#project--community-status
Closes #4737 from yaooqinn/yaooqinn-patch-2.
Closes #4737
128f802fc [Kent Yao] Restore Project & Community Status in README.md
Authored-by: Kent Yao
Signed-off-by: Cheng Pan
---
README.md | 55 ++++++++++++++++++++++++++++++++++---------------------
1 file changed, 34 insertions(+), 21 deletions(-)
diff --git a/README.md b/README.md
index 43efc4c66d3..976d2d7c769 100644
--- a/README.md
+++ b/README.md
@@ -20,20 +20,20 @@
@@ -49,8 +49,6 @@
Apache Kyuubi™ is a distributed and multi-tenant gateway to provide serverless
SQL on data warehouses and lakehouses.
-
-
## What is Kyuubi?
Kyuubi provides a pure SQL gateway through Thrift JDBC/ODBC interface for end-users to manipulate large-scale data with pre-programmed and extensible Spark SQL engines. This "out-of-the-box" model minimizes the barriers and costs for end-users to use Spark at the client side. At the server-side, Kyuubi server and engines' multi-tenant architecture provides the administrators a way to achieve computing resource isolation, data security, high availability, high client concurrency, etc.
@@ -105,11 +103,7 @@ and others would not be possible without your help.
![](./docs/imgs/kyuubi_ecosystem.drawio.png)
-## Online Documentation
-
-Since Kyuubi 1.3.0-incubating, the Kyuubi online documentation is hosted by [https://kyuubi.apache.org/](https://kyuubi.apache.org/).
-You can find the latest Kyuubi documentation on [this web page](https://kyuubi.readthedocs.io/en/master/).
-For 1.2 and earlier versions, please check the [Readthedocs](https://kyuubi.readthedocs.io/en/v1.2.0/) directly.
+## Online Documentation
## Quick Start
@@ -117,9 +111,32 @@ Ready? [Getting Started](https://kyuubi.readthedocs.io/en/master/quick_start/) w
## [Contributing](./CONTRIBUTING.md)
-## Contributor over time
+## Project & Community Status
-[![Contributor over time](https://contributor-graph-api.apiseven.com/contributors-svg?chart=contributorOverTime&repo=apache/kyuubi)](https://api7.ai/contributor-graph?chart=contributorOverTime&repo=apache/kyuubi)
+
diff --git a/docs/develop_tools/developer.md b/docs/contributing/code/developer.md
similarity index 91%
rename from docs/develop_tools/developer.md
rename to docs/contributing/code/developer.md
index 329e219de46..8b52057e828 100644
--- a/docs/develop_tools/developer.md
+++ b/docs/contributing/code/developer.md
@@ -24,16 +24,6 @@
build/mvn versions:set -DgenerateBackupPoms=false
```
-## Update Document Version
-
-Whenever project version updates, please also update the document version at `docs/conf.py` to target the upcoming release.
-
-For example,
-
-```python
-release = '1.2.0'
-```
-
## Update Dependency List
Kyuubi uses the `dev/dependencyList` file to indicate what upstream dependencies will actually go to the server-side classpath.
diff --git a/docs/develop_tools/distribution.md b/docs/contributing/code/distribution.md
similarity index 98%
rename from docs/develop_tools/distribution.md
rename to docs/contributing/code/distribution.md
index 217f0a4178d..23c9c6542de 100644
--- a/docs/develop_tools/distribution.md
+++ b/docs/contributing/code/distribution.md
@@ -15,7 +15,7 @@
- limitations under the License.
-->
-# Building a Runnable Distribution
+# Building A Runnable Distribution
To create a Kyuubi distribution like those distributed by [Kyuubi Release Page](https://kyuubi.apache.org/releases.html),
and that is laid out to be runnable, use `./build/dist` in the project root directory.
diff --git a/docs/contributing/code/get_started.rst b/docs/contributing/code/get_started.rst
new file mode 100644
index 00000000000..33981a8cd6d
--- /dev/null
+++ b/docs/contributing/code/get_started.rst
@@ -0,0 +1,70 @@
+.. Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+.. http://www.apache.org/licenses/LICENSE-2.0
+
+.. Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Get Started
+===========
+
+Good First Issues
+-----------------
+
+.. image:: https://img.shields.io/github/issues/apache/kyuubi/good%20first%20issue?color=green&label=Good%20first%20issue&logo=gfi&logoColor=red&style=for-the-badge
+ :alt: GitHub issues by-label
+ :target: `Good First Issues`_
+
+**Good First Issue** is initiative to curate easy pickings for first-time
+contributors. It helps you locate suitable development tasks with beginner's
+skills required, and finally make your first contribution to Kyuubi.
+
+After solving one or more good first issues, you should be able to
+
+- Find efficient ways to communicate with the community and get help
+- Setup `develop environment`_ on your machine
+- `Build`_ Kyuubi from source
+- `Run tests`_ locally
+- `Submit a pull request`_ through Github
+- Be listed in `Apache Kyuubi contributors`_
+- And most importantly, you can move to the next level and try some tricky issues
+
+.. note:: Don't linger too long at this stage.
+ :class: dropdown, toggle
+
+Help Wanted Issues
+------------------
+
+.. image:: https://img.shields.io/github/issues/apache/kyuubi/help%20wanted?color=brightgreen&label=HELP%20WANTED&style=for-the-badge
+ :alt: GitHub issues by-label
+ :target: `Help Wanted Issues`_
+
+Issues that maintainers labeled as help wanted are mostly
+
+- sub-tasks of an ongoing shorthanded umbrella
+- non-urgent improvements
+- bug fixes for corner cases
+- feature requests not covered by current technology stack of kyuubi community
+
+Since these problems are not urgent, you can take your time when fixing them.
+
+.. note:: Help wanted issues may contain easy pickings and tricky ones.
+ :class: dropdown, toggle
+
+
+.. _Good First Issues: https://github.com/apache/kyuubi/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22
+.. _develop environment: idea_setup.html
+.. _Build: build.html
+.. _Run tests: testing.html
+.. _Submit a pull request: https://kyuubi.apache.org/pull_request.html
+.. _Apache Kyuubi contributors: https://github.com/apache/kyuubi/graphs/contributors
+.. _Help Wanted Issues: https://github.com/apache/kyuubi/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22
+
diff --git a/docs/develop_tools/idea_setup.md b/docs/contributing/code/idea_setup.md
similarity index 100%
rename from docs/develop_tools/idea_setup.md
rename to docs/contributing/code/idea_setup.md
diff --git a/docs/develop_tools/index.rst b/docs/contributing/code/index.rst
similarity index 84%
rename from docs/develop_tools/index.rst
rename to docs/contributing/code/index.rst
index c56321cb379..25a6e421baa 100644
--- a/docs/develop_tools/index.rst
+++ b/docs/contributing/code/index.rst
@@ -13,15 +13,19 @@
See the License for the specific language governing permissions and
limitations under the License.
-Develop Tools
-=============
+Contributing Code
+=================
+
+These sections explain the process, guidelines, and tools for contributing
+code to the Kyuubi project.
.. toctree::
:maxdepth: 2
+ get_started
+ style
building
distribution
- build_document
testing
debugging
developer
diff --git a/docs/contributing/code/style.rst b/docs/contributing/code/style.rst
new file mode 100644
index 00000000000..d967e895971
--- /dev/null
+++ b/docs/contributing/code/style.rst
@@ -0,0 +1,39 @@
+.. Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+.. http://www.apache.org/licenses/LICENSE-2.0
+
+.. Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Code Style Guide
+================
+
+Code is written once by its author, but read and modified multiple times by
+lots of other engineers. As most bugs actually come from future modification
+of the code, we need to optimize our codebase for long-term, global
+readability and maintainability. The best way to achieve this is to write
+simple code.
+
+Kyuubi's source code is multilingual, specific code style will be applied to
+corresponding language.
+
+Scala Coding Style Guide
+------------------------
+
+Kyuubi adopts the `Databricks Scala Coding Style Guide`_ for scala codes.
+
+Java Coding Style Guide
+-----------------------
+
+Kyuubi adopts the `Google Java style`_ for java codes.
+
+.. _Databricks Scala Coding Style Guide: https://github.com/databricks/scala-style-guide
+.. _Google Java style: https://google.github.io/styleguide/javaguide.html
\ No newline at end of file
diff --git a/docs/develop_tools/testing.md b/docs/contributing/code/testing.md
similarity index 100%
rename from docs/develop_tools/testing.md
rename to docs/contributing/code/testing.md
diff --git a/docs/contributing/doc/build.rst b/docs/contributing/doc/build.rst
new file mode 100644
index 00000000000..4ec2362f350
--- /dev/null
+++ b/docs/contributing/doc/build.rst
@@ -0,0 +1,96 @@
+.. Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+.. http://www.apache.org/licenses/LICENSE-2.0
+
+.. Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Building Documentation
+======================
+
+Follow the steps below and learn how to build the Kyuubi documentation as the
+one you are watching now.
+
+Setup Environment
+-----------------
+
+- Firstly, install ``virtualenv``, this is optional but recommended as it is useful
+ to create an independent environment to resolve dependency issues for building
+ the documentation.
+
+.. code-block:: sh
+ :caption: Install virtualenv
+
+ $ pip install virtualenv
+
+- Switch to the ``docs`` root directory.
+
+.. code-block:: sh
+ :caption: Switch to docs
+
+ $ cd $KYUUBI_SOURCE_PATH/docs
+
+- Create a virtual environment named 'kyuubi' or anything you like using ``virtualenv``
+ if it's not existing.
+
+.. code-block:: sh
+ :caption: New virtual environment
+
+ $ virtualenv kyuubi
+
+- Activate the virtual environment,
+
+.. code-block:: sh
+ :caption: Activate virtual environment
+
+ $ source ./kyuubi/bin/activate
+
+Install All Dependencies
+------------------------
+
+Install all dependencies enumerated in the ``requirements.txt``.
+
+.. code-block:: sh
+ :caption: Install dependencies
+
+ $ pip install -r requirements.txt
+
+
+Create Documentation
+--------------------
+
+Make sure you are in the ``$KYUUBI_SOURCE_PATH/docs`` directory.
+
+Linux & MacOS
+~~~~~~~~~~~~~
+
+.. code-block:: sh
+ :caption: Sphinx build on Unix-like OS
+
+ $ make html
+
+Windows
+~~~~~~~
+
+.. code-block:: sh
+ :caption: Sphinx build on Windows
+
+ $ make.bat html
+
+
+If the build process succeed, the HTML pages are in
+``$KYUUBI_SOURCE_PATH/docs/_build/html``.
+
+View Locally
+------------
+
+Open the `$KYUUBI_SOURCE_PATH/docs/_build/html/index.html` file in your
+favorite web browser.
diff --git a/docs/contributing/doc/get_started.rst b/docs/contributing/doc/get_started.rst
new file mode 100644
index 00000000000..f262695b777
--- /dev/null
+++ b/docs/contributing/doc/get_started.rst
@@ -0,0 +1,117 @@
+.. Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+.. http://www.apache.org/licenses/LICENSE-2.0
+
+.. Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Get Started
+===========
+
+.. image:: https://img.shields.io/github/issues/apache/kyuubi/kind:documentation?color=green&logo=gfi&logoColor=red&style=for-the-badge
+ :alt: GitHub issues by-label
+
+
+Trivial Fixes
+-------------
+
+For typos, layout, grammar, spelling, punctuation errors and other similar issues
+or changes that occur within a single file, it is acceptable to make edits directly
+on the page being viewed. When viewing a source file on kyuubi's
+`Github repository`_, a simple click on the ``edit icon`` or keyboard shortcut
+``e`` will activate the editor. Similarly, when viewing files on `Read The Docs`_
+platform, clicking on the ``suggest edit`` button will lead you to the editor.
+These methods do not require any local development environment setup and
+are convenient for making quick fixes.
+
+Upon completion of the editing process, opt the ``commit changes`` option,
+adhere to the provided instructions to submit a pull request,
+and await feedback from the designated reviewer.
+
+Major Fixes
+-----------
+
+For significant modifications that affect multiple files, it is advisable to
+clone the repository to a local development environment, implement the necessary
+changes, and conduct thorough testing prior to submitting a pull request.
+
+
+`Fork`_ The Repository
+~~~~~~~~~~~~~~~~~~~~~~
+
+Clone The Forked Repository
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block::
+ :caption: Clone the repository
+
+ $ git clone https://github.com/your_username/kyuubi.git
+
+Replace "your_username" with your GitHub username. This will create a local
+copy of your forked repository on your machine. You will see the ``master``
+branch if you run ``git branch`` in the ``kyuubi`` folder.
+
+Create A New Branch
+~~~~~~~~~~~~~~~~~~~
+
+.. code-block::
+ :caption: Create a new branch
+
+ $ git checkout -b guide
+ Switched to a new branch 'guide'
+
+Editing And Testing
+~~~~~~~~~~~~~~~~~~~
+
+Make the necessary changes to the documentation files using a text editor.
+`Build and verify`_ the changes you have made to see if they look fine.
+
+.. note::
+ :class: dropdown, toggle
+
+Create A Pull Request
+~~~~~~~~~~~~~~~~~~~~~
+
+Once you have made the changes,
+
+- Commit them with a descriptive commit message using the command:
+
+.. code-block::
+ :caption: commit the changes
+
+ $ git commit -m "Description of changes made"
+
+- Push the changes to your forked repository using the command
+
+.. code-block::
+ :caption: push the changes
+
+ $ git push origin guide
+
+- `Create A Pull Request`_ with a descriptive PR title and description.
+
+- Polishing the PR with comments of reviews addressed
+
+Report Only
+-----------
+
+If you don't have time to fix the doc issue and submit a pull request on your own,
+`reporting a document issue`_ also helps. Please follow some basic rules:
+
+- Use the title field to clearly describe the issue
+- Choose the documentation report template
+- Fill out the required field in the documentation report
+
+.. _Home Page: https://kyuubi.apache.org
+.. _Fork: https://github.com/apache/kyuubi/fork
+.. _Build and verify: build.html
+.. _Create A Pull Request: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request
+.. _reporting a document issue: https://github.com/apache/kyuubi/issues/new/choose
\ No newline at end of file
diff --git a/docs/contributing/doc/index.rst b/docs/contributing/doc/index.rst
new file mode 100644
index 00000000000..bf6ae41bde2
--- /dev/null
+++ b/docs/contributing/doc/index.rst
@@ -0,0 +1,44 @@
+.. Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+.. http://www.apache.org/licenses/LICENSE-2.0
+
+.. Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Contributing Documentations
+===========================
+
+The project documentation is crucial for users and contributors. This guide
+outlines the contribution guidelines for Apache Kyuubi documentation.
+
+Kyuubi's documentation source files are maintained in the same `github repository`_
+as the code base, which ensures updating code and documentation synchronously.
+All documentation source files can be found in the sub-folder named ``docs``.
+
+Kyuubi's documentation is published and hosted on `Read The Docs`_ platform by
+version. with each version having its own dedicated page. To access a specific
+version of the document, simply navigate to the "Docs" tab on our Home Page.
+
+We welcome any contributions to the documentation, including but not limited to
+writing, translation, report doc issues on Github, reposting.
+
+
+.. toctree::
+ :maxdepth: 2
+
+ get_started
+ style
+ build
+
+.. _Github repository: https://github.com/apache/kyuubi
+.. _Restructured Text: https://en.wikipedia.org/wiki/ReStructuredText
+.. _Read The Docs: https://kyuubi.rtfd.io
+.. _Home Page: https://kyuubi.apache.org
\ No newline at end of file
diff --git a/docs/contributing/doc/style.rst b/docs/contributing/doc/style.rst
new file mode 100644
index 00000000000..14cc2b8ac78
--- /dev/null
+++ b/docs/contributing/doc/style.rst
@@ -0,0 +1,135 @@
+.. Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+.. http://www.apache.org/licenses/LICENSE-2.0
+
+.. Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Documentation Style Guide
+=========================
+
+This guide contains guidelines, not rules. While guidelines are important
+to follow, they are not hard and fast rules. It's important to use your
+own judgement and discretion when creating content, and to depart from the
+guidelines when necessary to improve the quality and effectiveness of your
+content. Ultimately, the goal is to create content that is clear, concise,
+and useful to your audience, and sometimes deviating from the guidelines
+may be necessary to achieve that goal.
+
+Goals
+-----
+
+- Source text files are readable and portable
+- Source diagram files are editable
+- Source files are maintainable over time and across community
+
+License Header
+--------------
+
+All original documents should include the ASF license header. All reproduced
+or quoted content should be authorized and attributed to the source.
+
+If you are about to quote some from commercial materials, please refer to
+`ASF 3RD PARTY LICENSE POLICY`_, or consult the Apache Kyuubi PMC to avoid
+legality issues.
+
+General Style
+-------------
+
+- Use `ReStructuredText`_ or `Markdown`_ format for text, avoid HTML hacks
+- Use `draw.io`_ for drawing or editing an image, and export it as PNG for
+ referencing in document. A pull request should commit both of them
+- Use Kyuubi for short instead of Apache Kyuubi after the first time in the
+ same page
+- Character line limit: 78, except unbreakable ones
+- Prefer lists to tables
+- Prefer unordered list than ordered
+
+ReStructuredText
+----------------
+
+Headings
+~~~~~~~~
+
+- Use **Pascal Case**, every word starts with an uppercase letter,
+ e.g., 'Documentation Style Guide'
+- Use a max of **three levels**
+ - Split into multiple files when there comes an H4
+ - Prefer `directive rubric`_ than H4
+- Use underline-only adornment styles, **DO NOT** use overline
+ - The length of underline characters **SHOULD** match the title
+ - H1 should be underlined with '='
+ - H2 should be underlined with '-'
+ - H3 should be underlined with '~'
+ - H4 should be underlined with '^', but it's better to avoid using H4
+- **DO NOT** use numbering for sections
+- **DO NOT** use "Kyuubi" in titles if possible
+
+Links
+~~~~~
+
+- Define links with short descriptive phrases, group them at the bottom of the file
+
+.. note::
+ :class: dropdown, toggle
+
+ .. code-block::
+ :caption: Recommended
+
+ Please refer to `Apache Kyuubi Home Page`_.
+
+ .. _Apache Kyuubi Home Page: https://kyuubi.apache.org/
+
+ .. code-block::
+ :caption: Not recommended
+
+ Please refer to `Apache Kyuubi Home Page `_.
+
+
+Markdown
+--------
+
+Headings
+~~~~~~~~
+
+- Use **Pascal Case**, every word starts with an uppercase letter,
+ e.g., 'Documentation Style Guide'
+- Use a max of **three levels**
+ - Split into multiple files when there comes an H4
+- **DO NOT** use numbering for sections
+- **DO NOT** use "Kyuubi" in titles if possible
+
+Images
+------
+
+Use images only when they provide helpful visual explanations of information
+otherwise difficult to express with words
+
+Third-party references
+----------------------
+
+If the preceding references don't provide explicit guidance, then see these
+third-party references, depending on the nature of your question:
+
+- `Google developer documentation style`_
+- `Apple Style Guide`_
+- `Red Hat supplementary style guide for product documentation`_
+
+.. References
+
+.. _ASF 3RD PARTY LICENSE POLICY: https://www.apache.org/legal/resolved.html#asf-3rd-party-license-policy
+.. _directive rubric :https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#directive-rubric
+.. _ReStructuredText: https://docutils.sourceforge.io/rst.html
+.. _Markdown: https://en.wikipedia.org/wiki/Markdown
+.. _draw.io: https://www.diagrams.net/
+.. _Google developer documentation style: https://developers.google.com/style
+.. _Apple Style Guide: https://help.apple.com/applestyleguide/
+.. _Red Hat supplementary style guide for product documentation: https://redhat-documentation.github.io/supplementary-style-guide/
diff --git a/docs/develop_tools/build_document.md b/docs/develop_tools/build_document.md
deleted file mode 100644
index 0be5a180705..00000000000
--- a/docs/develop_tools/build_document.md
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-# Building Kyuubi Documentation
-
-Follow the steps below and learn how to build the Kyuubi documentation as the one you are watching now.
-
-## Install & Activate `virtualenv`
-
-Firstly, install `virtualenv`, this is optional but recommended as it is useful to create an independent environment to resolve dependency issues for building the documentation.
-
-```bash
-pip install virtualenv
-```
-
-Switch to the `docs` root directory.
-
-```bash
-cd $KYUUBI_SOURCE_PATH/docs
-```
-
-Create a virtual environment named 'kyuubi' or anything you like using `virtualenv` if it's not existing.
-
-```bash
-virtualenv kyuubi
-```
-
-Activate it,
-
-```bash
-source ./kyuubi/bin/activate
-```
-
-## Install all dependencies
-
-Install all dependencies enumerated in the `requirements.txt`.
-
-```bash
-pip install -r requirements.txt
-```
-
-## Create Documentation
-
-Make sure you are in the `$KYUUBI_SOURCE_PATH/docs` directory.
-
-linux & macos
-
-```bash
-make html
-```
-
-windows
-
-```bash
-make.bat html
-```
-
-If the build process succeed, the HTML pages are in `$KYUUBI_SOURCE_PATH/docs/_build/html`.
-
-## View Locally
-
-Open the `$KYUUBI_SOURCE_PATH/docs/_build/html/index.html` file in your favorite web browser.
diff --git a/docs/index.rst b/docs/index.rst
index fbd299e7b86..7f6e0a2486a 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -216,7 +216,13 @@ What's Next
:caption: Contributing
:maxdepth: 2
- develop_tools/index
+ contributing/code/index
+ contributing/doc/index
+
+.. toctree::
+ :caption: Community
+ :maxdepth: 2
+
community/index
.. toctree::
diff --git a/docs/requirements.txt b/docs/requirements.txt
index ecc8116e77d..8e1f5c47119 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -24,3 +24,5 @@ sphinx-book-theme==0.3.3
sphinx-markdown-tables==0.0.17
sphinx-notfound-page==0.8.3
sphinx-togglebutton===0.3.2
+sphinxemoji===0.2.0
+sphinx-copybutton===0.5.2
From 4762edc6228f0e1a405f4e0d6b4beaca7d4c63e1 Mon Sep 17 00:00:00 2001
From: zwangsheng <2213335496@qq.com>
Date: Thu, 27 Apr 2023 20:33:43 +0800
Subject: [PATCH 085/404] [KYUUBI #4757][UI] Move Statistics to Management Menu
### _Why are the changes needed?_
Close #4757
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4758 from zwangsheng/KYUUBI_4757.
Closes #4757
2daa5fc84 [zwangsheng] fix comments
b1d5177ce [zwangsheng] [KYUUBI #4757][UI] Move Statistics to Management Menu
Authored-by: zwangsheng <2213335496@qq.com>
Signed-off-by: Cheng Pan
---
.../web-ui/src/router/engine/index.ts | 26 -------------
kyuubi-server/web-ui/src/router/index.ts | 8 +---
.../router/{server => management}/index.ts | 21 ++++++++--
.../web-ui/src/router/operation/index.ts | 5 ---
.../web-ui/src/router/session/index.ts | 26 -------------
.../views/layout/components/aside/types.ts | 39 +++++++------------
.../views/{ => management}/engine/index.vue | 0
.../operation}/index.vue | 0
.../server}/index.vue | 0
.../session}/index.vue | 0
10 files changed, 33 insertions(+), 92 deletions(-)
delete mode 100644 kyuubi-server/web-ui/src/router/engine/index.ts
rename kyuubi-server/web-ui/src/router/{server => management}/index.ts (62%)
delete mode 100644 kyuubi-server/web-ui/src/router/session/index.ts
rename kyuubi-server/web-ui/src/views/{ => management}/engine/index.vue (100%)
rename kyuubi-server/web-ui/src/views/{operation/operation-statistics => management/operation}/index.vue (100%)
rename kyuubi-server/web-ui/src/views/{server/server-statistics => management/server}/index.vue (100%)
rename kyuubi-server/web-ui/src/views/{session/session-statistics => management/session}/index.vue (100%)
diff --git a/kyuubi-server/web-ui/src/router/engine/index.ts b/kyuubi-server/web-ui/src/router/engine/index.ts
deleted file mode 100644
index 22b056a32ed..00000000000
--- a/kyuubi-server/web-ui/src/router/engine/index.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const routes = [
- {
- path: '/engine/engine-statistics',
- name: 'engine-statistics',
- component: () => import('@/views/engine/index.vue')
- }
-]
-
-export default routes
diff --git a/kyuubi-server/web-ui/src/router/index.ts b/kyuubi-server/web-ui/src/router/index.ts
index ce9960ba6aa..cad8317051a 100644
--- a/kyuubi-server/web-ui/src/router/index.ts
+++ b/kyuubi-server/web-ui/src/router/index.ts
@@ -20,9 +20,7 @@ import overviewRoutes from './overview'
import workloadRoutes from './workload'
import operationRoutes from './operation'
import contactRoutes from './contact'
-import sessionRoutes from './session'
-import engineRoutes from './engine'
-import serverRoutes from './server'
+import managementRoutes from './management'
const routes = [
{
@@ -39,11 +37,9 @@ const routes = [
redirect: 'overview',
children: [
...overviewRoutes,
- ...sessionRoutes,
...workloadRoutes,
...operationRoutes,
- ...engineRoutes,
- ...serverRoutes,
+ ...managementRoutes,
...contactRoutes
]
}
diff --git a/kyuubi-server/web-ui/src/router/server/index.ts b/kyuubi-server/web-ui/src/router/management/index.ts
similarity index 62%
rename from kyuubi-server/web-ui/src/router/server/index.ts
rename to kyuubi-server/web-ui/src/router/management/index.ts
index c0624048505..a87ff605216 100644
--- a/kyuubi-server/web-ui/src/router/server/index.ts
+++ b/kyuubi-server/web-ui/src/router/management/index.ts
@@ -17,9 +17,24 @@
const router = [
{
- path: '/server/server-statistics',
- name: 'server-statistics',
- component: () => import('@/views/server/server-statistics/index.vue')
+ path: '/management/engine',
+ name: 'engine',
+ component: () => import('@/views/management/engine/index.vue')
+ },
+ {
+ path: '/management/server',
+ name: 'server',
+ component: () => import('@/views/management/server/index.vue')
+ },
+ {
+ path: '/management/session',
+ name: 'session',
+ component: () => import('@/views/management/session/index.vue')
+ },
+ {
+ path: '/management/operation',
+ name: 'operation',
+ component: () => import('@/views/management/operation/index.vue')
}
]
diff --git a/kyuubi-server/web-ui/src/router/operation/index.ts b/kyuubi-server/web-ui/src/router/operation/index.ts
index 8d6dfbd9111..03ba4c28575 100644
--- a/kyuubi-server/web-ui/src/router/operation/index.ts
+++ b/kyuubi-server/web-ui/src/router/operation/index.ts
@@ -25,11 +25,6 @@ const routes = [
path: '/operation/completedJobs',
name: 'operation-completedJobs',
component: () => import('@/views/operation/completedJobs/index.vue')
- },
- {
- path: '/operation/operation-statistics',
- name: 'operation-statistics',
- component: () => import('@/views/operation/operation-statistics/index.vue')
}
]
diff --git a/kyuubi-server/web-ui/src/router/session/index.ts b/kyuubi-server/web-ui/src/router/session/index.ts
deleted file mode 100644
index fca49f21101..00000000000
--- a/kyuubi-server/web-ui/src/router/session/index.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const routes = [
- {
- path: '/session/session-statistics',
- name: 'session-statistics',
- component: () => import('@/views/session/session-statistics/index.vue')
- }
-]
-
-export default routes
diff --git a/kyuubi-server/web-ui/src/views/layout/components/aside/types.ts b/kyuubi-server/web-ui/src/views/layout/components/aside/types.ts
index 46ea825a9fd..697ee40cf81 100644
--- a/kyuubi-server/web-ui/src/views/layout/components/aside/types.ts
+++ b/kyuubi-server/web-ui/src/views/layout/components/aside/types.ts
@@ -22,32 +22,24 @@ export const MENUS = [
router: '/overview'
},
{
- label: 'Session Management',
+ label: 'Management',
icon: 'List',
children: [
{
- label: 'Session Statistics',
- router: '/session/session-statistics'
- }
- ]
- },
- {
- label: 'Engine Management',
- icon: 'List',
- children: [
+ label: 'Session',
+ router: '/management/session'
+ },
{
- label: 'Engine Statistics',
- router: '/engine/engine-statistics'
- }
- ]
- },
- {
- label: 'Server Management',
- icon: 'List',
- children: [
+ label: 'Operation',
+ router: '/management/operation'
+ },
{
- label: 'Server Statistics',
- router: '/server/server-statistics'
+ label: 'Engine',
+ router: '/management/engine'
+ },
+ {
+ label: 'Server',
+ router: '/management/server'
}
]
},
@@ -81,11 +73,6 @@ export const MENUS = [
label: 'Operation',
icon: 'List',
children: [
- {
- label: 'Operation Statistics',
- icon: 'VideoPlay',
- router: '/operation/operation-statistics'
- },
{
label: 'Running Jobs',
icon: 'VideoPlay',
diff --git a/kyuubi-server/web-ui/src/views/engine/index.vue b/kyuubi-server/web-ui/src/views/management/engine/index.vue
similarity index 100%
rename from kyuubi-server/web-ui/src/views/engine/index.vue
rename to kyuubi-server/web-ui/src/views/management/engine/index.vue
diff --git a/kyuubi-server/web-ui/src/views/operation/operation-statistics/index.vue b/kyuubi-server/web-ui/src/views/management/operation/index.vue
similarity index 100%
rename from kyuubi-server/web-ui/src/views/operation/operation-statistics/index.vue
rename to kyuubi-server/web-ui/src/views/management/operation/index.vue
diff --git a/kyuubi-server/web-ui/src/views/server/server-statistics/index.vue b/kyuubi-server/web-ui/src/views/management/server/index.vue
similarity index 100%
rename from kyuubi-server/web-ui/src/views/server/server-statistics/index.vue
rename to kyuubi-server/web-ui/src/views/management/server/index.vue
diff --git a/kyuubi-server/web-ui/src/views/session/session-statistics/index.vue b/kyuubi-server/web-ui/src/views/management/session/index.vue
similarity index 100%
rename from kyuubi-server/web-ui/src/views/session/session-statistics/index.vue
rename to kyuubi-server/web-ui/src/views/management/session/index.vue
From 430f6d5901cfb79267db93dffaa3dc3bb598592f Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Fri, 28 Apr 2023 09:59:48 +0800
Subject: [PATCH 086/404] [KYUUBI #4777] Deregister event handlers when
stopping server with event handler made auto-closeable
### _Why are the changes needed?_
- deregister event handlers when stopping Kyuubiserver, by deregister them from EventBus's handler Registry
- change `EventHandler` from `type` to `trait` and make it extending `AutoCloseable`
- implement `close` method in `JsonLoggingEventHandler` for closing writers and streams
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4777 from bowenliang123/closable-eventlogger.
Closes #4777
db1ad5d73 [liangbowen] make EventBus.deregisterAll method synchronized
648471ba1 [liangbowen] update
d28931d3c [liangbowen] re-register event loggers in ut
7121fa33a [liangbowen] make EventHandler closable, and de-register all event handlers when stopping server
Authored-by: liangbowen
Signed-off-by: liangbowen
---
.../org/apache/kyuubi/events/EventBus.scala | 16 ++++++++++++++++
.../events/handler/JsonLoggingEventHandler.scala | 16 ++++++++++++++--
.../apache/kyuubi/events/handler/package.scala | 6 +++++-
.../org/apache/kyuubi/server/KyuubiServer.scala | 4 +++-
.../ServerJsonLoggingEventHandlerSuite.scala | 2 ++
5 files changed, 40 insertions(+), 4 deletions(-)
diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventBus.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventBus.scala
index e854e40a769..063f1719ec2 100644
--- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventBus.scala
+++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventBus.scala
@@ -40,6 +40,8 @@ sealed trait EventBus {
def register[T <: KyuubiEvent: ClassTag](eventHandler: EventHandler[T]): EventBus
def registerAsync[T <: KyuubiEvent: ClassTag](eventHandler: EventHandler[T]): EventBus
+
+ def deregisterAll(): Unit = {}
}
object EventBus extends Logging {
@@ -68,6 +70,10 @@ object EventBus extends Logging {
def registerAsync[T <: KyuubiEvent: ClassTag](et: EventHandler[T]): EventBus =
defaultEventBus.registerAsync[T](et)
+ def deregisterAll(): Unit = synchronized {
+ defaultEventBus.deregisterAll()
+ }
+
private case class EventBusLive() extends EventBus {
private[this] lazy val eventHandlerRegistry = new Registry
private[this] lazy val asyncEventHandlerRegistry = new Registry
@@ -96,6 +102,11 @@ object EventBus extends Logging {
asyncEventHandlerRegistry.register(et)
this
}
+
+ override def deregisterAll(): Unit = {
+ eventHandlerRegistry.deregisterAll()
+ asyncEventHandlerRegistry.deregisterAll()
+ }
}
private class Registry {
@@ -122,5 +133,10 @@ object EventBus extends Logging {
} yield parent
clazz :: parents
}
+
+ def deregisterAll(): Unit = {
+ eventHandlers.values.flatten.foreach(_.close())
+ eventHandlers.clear()
+ }
}
}
diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/JsonLoggingEventHandler.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/JsonLoggingEventHandler.scala
index f6f74de9a28..77d80b1521c 100644
--- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/JsonLoggingEventHandler.scala
+++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/JsonLoggingEventHandler.scala
@@ -65,6 +65,17 @@ class JsonLoggingEventHandler(
stream.foreach(_.hflush())
}
+ override def close(): Unit = {
+ writers.values.foreach { case (writer, stream) =>
+ writer.flush()
+ stream.foreach(_.hflush())
+ writer.close()
+ stream.foreach(_.close())
+ }
+ writers.clear()
+ fs = null
+ }
+
private def getOrUpdate(event: KyuubiEvent): Logger = synchronized {
val partitions = event.partitions.map(kv => s"${kv._1}=${kv._2}").mkString(Path.SEPARATOR)
writers.getOrElseUpdate(
@@ -108,6 +119,7 @@ class JsonLoggingEventHandler(
}
object JsonLoggingEventHandler {
- val JSON_LOG_DIR_PERM: FsPermission = new FsPermission(Integer.parseInt("770", 8).toShort)
- val JSON_LOG_FILE_PERM: FsPermission = new FsPermission(Integer.parseInt("660", 8).toShort)
+ private val JSON_LOG_DIR_PERM: FsPermission = new FsPermission(Integer.parseInt("770", 8).toShort)
+ private val JSON_LOG_FILE_PERM: FsPermission =
+ new FsPermission(Integer.parseInt("660", 8).toShort)
}
diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/package.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/package.scala
index 41cf001ed31..69e1fdcee12 100644
--- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/package.scala
+++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/package.scala
@@ -18,5 +18,9 @@
package org.apache.kyuubi.events
package object handler {
- type EventHandler[T <: KyuubiEvent] = T => Unit
+ trait EventHandler[T <: KyuubiEvent] extends AutoCloseable {
+ def apply(event: T): Unit
+
+ def close(): Unit = {}
+ }
}
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiServer.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiServer.scala
index a7f2e817837..8bcd8d08430 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiServer.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/KyuubiServer.scala
@@ -193,5 +193,7 @@ class KyuubiServer(name: String) extends Serverable(name) {
ServerEventHandlerRegister.registerEventLoggers(conf)
}
- override protected def stopServer(): Unit = {}
+ override protected def stopServer(): Unit = {
+ EventBus.deregisterAll()
+ }
}
diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerJsonLoggingEventHandlerSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerJsonLoggingEventHandlerSuite.scala
index 3bdc9cd3808..7c79d6a870b 100644
--- a/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerJsonLoggingEventHandlerSuite.scala
+++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerJsonLoggingEventHandlerSuite.scala
@@ -33,6 +33,7 @@ import org.scalatest.time.SpanSugar.convertIntToGrainOfTime
import org.apache.kyuubi._
import org.apache.kyuubi.client.util.BatchUtils._
import org.apache.kyuubi.config.KyuubiConf
+import org.apache.kyuubi.events.ServerEventHandlerRegister
import org.apache.kyuubi.operation.HiveJDBCTestHelper
import org.apache.kyuubi.operation.OperationState._
import org.apache.kyuubi.server.KyuubiServer
@@ -197,6 +198,7 @@ class ServerJsonLoggingEventHandlerSuite extends WithKyuubiServer with HiveJDBCT
server.initialize(conf)
server.start()
server.stop()
+ ServerEventHandlerRegister.registerEventLoggers(conf) // register event loggers again
val hostName = InetAddress.getLocalHost.getCanonicalHostName
val kyuubiServerInfoPath =
From 71d680bef7f38e932ec611cd92a8534aa717a940 Mon Sep 17 00:00:00 2001
From: Paul Lin
Date: Tue, 2 May 2023 16:59:15 +0800
Subject: [PATCH 087/404] [KYUUBI #4742][DOCS] Add docs for Flink application
mode
### _Why are the changes needed?_
As titled.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4782 from link3280/KYUUBI-4742.
Closes #4742
10d33dc1d [Cheng Pan] Update docs/deployment/engine_on_yarn.md
268f9e008 [Cheng Pan] Update docs/deployment/engine_on_yarn.md
f5f55be8d [Paul Lin] [KYUUBI #4742][docs] Address comments
077b08c9b [Paul Lin] [KYUUBI #4742][docs] Apply spotless
9df25c1b2 [Paul Lin] [KYUUBI #4742][docs] Improve languages
ad367df52 [Paul Lin] [KYUUBI #4742][docs] Improve docs
6c0462493 [Paul Lin] [KYUUBI #4742][docs] Improve languages
865a4b518 [Paul Lin] [KYUUBI #4742][docs] Improve languages
79a4da217 [Paul Lin] [KYUUBI #4742][docs] Update docs
bdc88949a [Paul Lin] [KYUUBI #4742][docs] Apply spotless
134e3a7fa [Paul Lin] [KYUUBI #4742][docs] Reformat table
cde778ef5 [Paul Lin] [KYUUBI #4742][docs] Add docs for Flink application mode
Lead-authored-by: Paul Lin
Co-authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
docs/deployment/engine_on_yarn.md | 78 ++++++++++++++++++++++++-------
1 file changed, 60 insertions(+), 18 deletions(-)
diff --git a/docs/deployment/engine_on_yarn.md b/docs/deployment/engine_on_yarn.md
index 6812afa46db..66177bb0f6d 100644
--- a/docs/deployment/engine_on_yarn.md
+++ b/docs/deployment/engine_on_yarn.md
@@ -15,13 +15,13 @@
- limitations under the License.
-->
-# Deploy Kyuubi engines on Yarn
+# Deploy Kyuubi engines on YARN
-## Deploy Kyuubi Spark Engine on Yarn
+## Deploy Kyuubi Spark Engine on YARN
### Requirements
-When you want to deploy Kyuubi's Spark SQL engines on YARN, you'd better have cognition upon the following things.
+To deploy Kyuubi's Spark SQL engines on YARN, you'd better have cognition upon the following things.
- Knowing the basics about [Running Spark on YARN](https://spark.apache.org/docs/latest/running-on-yarn.html)
- A binary distribution of Spark which is built with YARN support
@@ -113,11 +113,11 @@ so `spark.kerberos.keytab` and `spark.kerberos.principal` should not use now.
Instead, you can schedule a periodically `kinit` process via `crontab` task on the local machine that hosts Kyuubi server or simply use [Kyuubi Kinit](settings.html#kinit).
-## Deploy Kyuubi Flink Engine on Yarn
+## Deploy Kyuubi Flink Engine on YARN
### Requirements
-When you want to deploy Kyuubi's Flink SQL engines on YARN, you'd better have cognition upon the following things.
+To deploy Kyuubi's Flink SQL engines on YARN, you'd better have cognition upon the following things.
- Knowing the basics about [Running Flink on YARN](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/resource-providers/yarn)
- A binary distribution of Flink which is built with YARN support
@@ -127,13 +127,59 @@ When you want to deploy Kyuubi's Flink SQL engines on YARN, you'd better have co
- An active Object Storage cluster, e.g. [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html), S3 and [Minio](https://min.io/) etc.
- Setup Hadoop client configurations at the machine the Kyuubi server locates
-### Yarn Session Mode
+### Flink Deployment Modes
+
+Currently, Flink supports two deployment modes on YARN: [YARN Application Mode](https://nightlies.apache.org/flink/flink-docs-release-1.17/docs/deployment/resource-providers/yarn/#application-mode) and [YARN Session Mode](https://nightlies.apache.org/flink/flink-docs-release-1.17/docs/deployment/resource-providers/yarn/#application-mode).
+
+- YARN Application Mode: In this mode, Kyuubi starts a dedicated Flink application cluster and runs the SQL engine on it.
+- YARN Session Mode: In this mode, Kyuubi starts the Flink SQL engine locally and connects to a running Flink YARN session cluster.
+
+As Kyuubi has to know the deployment mode before starting the SQL engine, it's required to specify the deployment mode in Kyuubi configuration.
+
+```properties
+# candidates: yarn-application, yarn-session
+flink.execution.target=yarn-application
+```
+
+### YARN Application Mode
+
+#### Flink Configurations
+
+Since the Flink SQL engine runs inside the JobManager, it's recommended to tune the resource configurations of the JobManager based on your workload.
+
+The related Flink configurations are listed below (see more details at [Flink Configuration](https://nightlies.apache.org/flink/flink-docs-master/docs/deployment/config/#yarn)):
+
+| Name | Default | Meaning |
+|--------------------------------|---------|----------------------------------------------------------------------------------------|
+| yarn.appmaster.vcores | 1 | The number of virtual cores (vcores) used by the JobManager (YARN application master). |
+| jobmanager.memory.process.size | (none) | Total size of the memory of the JobManager process. |
+
+```bash
+
+#### Environment
+
+Either `HADOOP_CONF_DIR` or `YARN_CONF_DIR` is configured and points to the Hadoop client configurations directory, usually, `$HADOOP_HOME/etc/hadoop`.
+
+You could verify your setup by the following command:
+
+```bash
+# we assume to be in the root directory of
+# the unzipped Flink distribution
+
+# (0) export HADOOP_CLASSPATH
+export HADOOP_CLASSPATH=`hadoop classpath`
+
+# (1) submit a Flink job and ensure it runs successfully
+./bin/flink run -m yarn-cluster ./examples/streaming/WordCount.jar
+```
+
+### YARN Session Mode
#### Flink Configurations
```bash
execution.target: yarn-session
-# Yarn Session Cluster application id.
+# YARN Session Cluster application id.
yarn.application.id: application_00000000XX_00XX
```
@@ -194,23 +240,19 @@ To use Hadoop vanilla jars, please configure $KYUUBI_HOME/conf/kyuubi-env.sh as
$ echo "export FLINK_HADOOP_CLASSPATH=`hadoop classpath`" >> $KYUUBI_HOME/conf/kyuubi-env.sh
```
-### Deployment Modes Supported by Flink on YARN
-
-For experiment use, we recommend deploying Kyuubi Flink SQL engine in [Session Mode](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/resource-providers/yarn/#session-mode).
-At present, [Application Mode](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/resource-providers/yarn/#application-mode) and [Per-Job Mode (deprecated)](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/resource-providers/yarn/#per-job-mode-deprecated) are not supported for Flink engine.
-
### Kerberos
-As Kyuubi Flink SQL engine wraps the Flink SQL client that currently does not support [Flink Kerberos Configuration](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/config/#security-kerberos-login-keytab),
-so `security.kerberos.login.keytab` and `security.kerberos.login.principal` should not use now.
+With regard to YARN application mode, Kerberos is supported natively by Flink, see [Flink Kerberos Configuration](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/config/#security-kerberos-login-keytab) for details.
-Instead, you can schedule a periodically `kinit` process via `crontab` task on the local machine that hosts Kyuubi server or simply use [Kyuubi Kinit](settings.html#kinit).
+With regard to YARN session mode, `security.kerberos.login.keytab` and `security.kerberos.login.principal` are not effective, as Kyuubi Flink SQL engine mainly relies on Flink SQL client which currently does not support [Flink Kerberos Configuration](https://nightlies.apache.org/flink/flink-docs-stable/docs/deployment/config/#security-kerberos-login-keytab),
+
+As a workaround, you can schedule a periodically `kinit` process via `crontab` task on the local machine that hosts Kyuubi server or simply use [Kyuubi Kinit](settings.html#kinit).
-## Deploy Kyuubi Hive Engine on Yarn
+## Deploy Kyuubi Hive Engine on YARN
### Requirements
-When you want to deploy Kyuubi's Hive SQL engines on YARN, you'd better have cognition upon the following things.
+To deploy Kyuubi's Hive SQL engines on YARN, you'd better have cognition upon the following things.
- Knowing the basics about [Running Hive on YARN](https://cwiki.apache.org/confluence/display/Hive/GettingStarted)
- A binary distribution of Hive
@@ -239,7 +281,7 @@ $ $HIVE_HOME/bin/beeline -u 'jdbc:hive2://localhost:10000/default'
0: jdbc:hive2://localhost:10000/default> INSERT INTO TABLE pokes VALUES (1, 'hello');
```
-If the `Hive SQL` passes and there is a job in Yarn Web UI, It indicates the hive environment is normal.
+If the `Hive SQL` passes and there is a job in YARN Web UI, it indicates the hive environment is good.
#### Required Environment Variable
From e4fe4814ec6d8f888997deb7f46fe3d608fa607d Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Wed, 3 May 2023 18:39:22 +0800
Subject: [PATCH 088/404] [KYUUBI #4783] Use pnpm v8 and update pnpm lockfile
to v6 schema
### _Why are the changes needed?_
- v8 is now the stable version of pnpm
- lockfile v6 is used by default since pnpm v8.0, which improves the readability of the lockfile by removing hashes from package IDs and more resistant to git merge conflicts (https://github.com/pnpm/pnpm/releases/tag/v8.0.0)
- pnpm 8 supports nodejs 16 and 18 (https://pnpm.io/installation#compatibility)
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4783 from bowenliang123/pnpm8.
Closes #4783
8b3db3820 [liangbowen] use pnpm v8 and update lock file to v6
Authored-by: liangbowen
Signed-off-by: Cheng Pan
---
.github/workflows/style.yml | 2 +-
.github/workflows/web-ui.yml | 2 +-
kyuubi-server/web-ui/README.md | 2 +-
kyuubi-server/web-ui/pnpm-lock.yaml | 926 ++++++++++++++--------------
pom.xml | 2 +-
5 files changed, 472 insertions(+), 462 deletions(-)
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index b045de39705..e2e6fd70e0b 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -93,7 +93,7 @@ jobs:
- name: Web UI Style with node
run: |
cd ./kyuubi-server/web-ui
- npm install pnpm@7 -g
+ npm install pnpm@8 -g
pnpm install
pnpm run lint
echo "---------------------------------------Notice------------------------------------"
diff --git a/.github/workflows/web-ui.yml b/.github/workflows/web-ui.yml
index 2a48eeaa1ea..65fb4759453 100644
--- a/.github/workflows/web-ui.yml
+++ b/.github/workflows/web-ui.yml
@@ -28,7 +28,7 @@ jobs:
- name: npm run coverage & build
run: |
cd ./kyuubi-server/web-ui
- npm install pnpm@7 -g
+ npm install pnpm@8 -g
pnpm install
pnpm run coverage
pnpm run build
diff --git a/kyuubi-server/web-ui/README.md b/kyuubi-server/web-ui/README.md
index b892a690261..abac83e9f05 100644
--- a/kyuubi-server/web-ui/README.md
+++ b/kyuubi-server/web-ui/README.md
@@ -45,7 +45,7 @@ npm run prettier
### Recommend
-If you want to save disk space and boost installation speed, we recommend using `pnpm 7.x.x` to instead of npm.
+If you want to save disk space and boost installation speed, we recommend using `pnpm 8.x.x` to instead of npm.
You can learn how to install the corresponding version from its official website.
- [pnpm](https://pnpm.io/)
diff --git a/kyuubi-server/web-ui/pnpm-lock.yaml b/kyuubi-server/web-ui/pnpm-lock.yaml
index 1926352abe6..6776a3fb5ae 100644
--- a/kyuubi-server/web-ui/pnpm-lock.yaml
+++ b/kyuubi-server/web-ui/pnpm-lock.yaml
@@ -1,85 +1,111 @@
-lockfileVersion: 5.4
-
-specifiers:
- '@element-plus/icons-vue': ^2.0.9
- '@iconify-json/ep': ^1.1.6
- '@types/node': ^18.7.1
- '@typescript-eslint/eslint-plugin': ^5.33.0
- '@typescript-eslint/parser': ^5.33.0
- '@vitejs/plugin-vue': ^3.0.0
- '@vitest/coverage-c8': ^0.22.0
- '@vue/eslint-config-prettier': ^7.0.0
- '@vue/eslint-config-typescript': ^11.0.0
- '@vue/test-utils': ^2.0.2
- axios: ^0.27.2
- date-fns: ^2.29.3
- element-plus: ^2.2.12
- eslint: ^8.21.0
- eslint-plugin-prettier: ^4.2.1
- eslint-plugin-vue: ^9.3.0
- jsdom: ^20.0.0
- pinia: ^2.0.18
- pinia-plugin-persistedstate: ^2.1.1
- prettier: ^2.7.1
- sass: ^1.54.4
- typescript: ^4.6.4
- vite: ^3.0.0
- vitest: ^0.22.0
- vue: ^3.2.37
- vue-i18n: ^9.2.2
- vue-router: ^4.1.3
- vue-tsc: ^0.38.4
+lockfileVersion: '6.0'
dependencies:
- '@element-plus/icons-vue': 2.0.9_vue@3.2.37
- axios: 0.27.2
- date-fns: 2.29.3
- element-plus: 2.2.13_vue@3.2.37
- pinia: 2.0.18_j6bzmzd4ujpabbp5objtwxyjp4
- pinia-plugin-persistedstate: 2.1.1_pinia@2.0.18
- vue: 3.2.37
- vue-i18n: 9.2.2_vue@3.2.37
- vue-router: 4.1.3_vue@3.2.37
+ '@element-plus/icons-vue':
+ specifier: ^2.0.9
+ version: 2.0.9(vue@3.2.37)
+ axios:
+ specifier: ^0.27.2
+ version: 0.27.2
+ date-fns:
+ specifier: ^2.29.3
+ version: 2.29.3
+ element-plus:
+ specifier: ^2.2.12
+ version: 2.2.13(vue@3.2.37)
+ pinia:
+ specifier: ^2.0.18
+ version: 2.0.18(typescript@4.7.4)(vue@3.2.37)
+ pinia-plugin-persistedstate:
+ specifier: ^2.1.1
+ version: 2.1.1(pinia@2.0.18)
+ vue:
+ specifier: ^3.2.37
+ version: 3.2.37
+ vue-i18n:
+ specifier: ^9.2.2
+ version: 9.2.2(vue@3.2.37)
+ vue-router:
+ specifier: ^4.1.3
+ version: 4.1.3(vue@3.2.37)
devDependencies:
- '@iconify-json/ep': 1.1.7
- '@types/node': 18.7.6
- '@typescript-eslint/eslint-plugin': 5.33.1_vsoshirnpb7xw6mr7xomgfas2i
- '@typescript-eslint/parser': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq
- '@vitejs/plugin-vue': 3.0.3_vite@3.0.8+vue@3.2.37
- '@vitest/coverage-c8': 0.22.0_jsdom@20.0.0+sass@1.54.4
- '@vue/eslint-config-prettier': 7.0.0_2xd4q2tc5cqa5as7uugqhp6oue
- '@vue/eslint-config-typescript': 11.0.0_4py5zxx5ck6utobkmozwvrmyiy
- '@vue/test-utils': 2.0.2_vue@3.2.37
- eslint: 8.22.0
- eslint-plugin-prettier: 4.2.1_2xd4q2tc5cqa5as7uugqhp6oue
- eslint-plugin-vue: 9.3.0_eslint@8.22.0
- jsdom: 20.0.0
- prettier: 2.7.1
- sass: 1.54.4
- typescript: 4.7.4
- vite: 3.0.8_sass@1.54.4
- vitest: 0.22.0_jsdom@20.0.0+sass@1.54.4
- vue-tsc: 0.38.9_typescript@4.7.4
+ '@iconify-json/ep':
+ specifier: ^1.1.6
+ version: 1.1.7
+ '@types/node':
+ specifier: ^18.7.1
+ version: 18.7.6
+ '@typescript-eslint/eslint-plugin':
+ specifier: ^5.33.0
+ version: 5.33.1(@typescript-eslint/parser@5.33.1)(eslint@8.22.0)(typescript@4.7.4)
+ '@typescript-eslint/parser':
+ specifier: ^5.33.0
+ version: 5.33.1(eslint@8.22.0)(typescript@4.7.4)
+ '@vitejs/plugin-vue':
+ specifier: ^3.0.0
+ version: 3.0.3(vite@3.0.8)(vue@3.2.37)
+ '@vitest/coverage-c8':
+ specifier: ^0.22.0
+ version: 0.22.0(jsdom@20.0.0)(sass@1.54.4)
+ '@vue/eslint-config-prettier':
+ specifier: ^7.0.0
+ version: 7.0.0(eslint@8.22.0)(prettier@2.7.1)
+ '@vue/eslint-config-typescript':
+ specifier: ^11.0.0
+ version: 11.0.0(eslint-plugin-vue@9.3.0)(eslint@8.22.0)(typescript@4.7.4)
+ '@vue/test-utils':
+ specifier: ^2.0.2
+ version: 2.0.2(vue@3.2.37)
+ eslint:
+ specifier: ^8.21.0
+ version: 8.22.0
+ eslint-plugin-prettier:
+ specifier: ^4.2.1
+ version: 4.2.1(eslint-config-prettier@8.5.0)(eslint@8.22.0)(prettier@2.7.1)
+ eslint-plugin-vue:
+ specifier: ^9.3.0
+ version: 9.3.0(eslint@8.22.0)
+ jsdom:
+ specifier: ^20.0.0
+ version: 20.0.0
+ prettier:
+ specifier: ^2.7.1
+ version: 2.7.1
+ sass:
+ specifier: ^1.54.4
+ version: 1.54.4
+ typescript:
+ specifier: ^4.6.4
+ version: 4.7.4
+ vite:
+ specifier: ^3.0.0
+ version: 3.0.8(sass@1.54.4)
+ vitest:
+ specifier: ^0.22.0
+ version: 0.22.0(jsdom@20.0.0)(sass@1.54.4)
+ vue-tsc:
+ specifier: ^0.38.4
+ version: 0.38.9(typescript@4.7.4)
packages:
- /@babel/helper-string-parser/7.18.10:
+ /@babel/helper-string-parser@7.18.10:
resolution: {integrity: sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==}
engines: {node: '>=6.9.0'}
- /@babel/helper-validator-identifier/7.18.6:
+ /@babel/helper-validator-identifier@7.18.6:
resolution: {integrity: sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g==}
engines: {node: '>=6.9.0'}
- /@babel/parser/7.18.11:
+ /@babel/parser@7.18.11:
resolution: {integrity: sha512-9JKn5vN+hDt0Hdqn1PiJ2guflwP+B6Ga8qbDuoF0PzzVhrzsKIJo8yGqVk6CmMHiMei9w1C1Bp9IMJSIK+HPIQ==}
engines: {node: '>=6.0.0'}
hasBin: true
dependencies:
'@babel/types': 7.18.10
- /@babel/types/7.18.10:
+ /@babel/types@7.18.10:
resolution: {integrity: sha512-MJvnbEiiNkpjo+LknnmRrqbY1GPUUggjv+wQVjetM/AONoupqRALB7I6jGqNUAZsKcRIEu2J6FRFvsczljjsaQ==}
engines: {node: '>=6.9.0'}
dependencies:
@@ -87,16 +113,16 @@ packages:
'@babel/helper-validator-identifier': 7.18.6
to-fast-properties: 2.0.0
- /@bcoe/v8-coverage/0.2.3:
+ /@bcoe/v8-coverage@0.2.3:
resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==}
dev: true
- /@ctrl/tinycolor/3.4.1:
+ /@ctrl/tinycolor@3.4.1:
resolution: {integrity: sha512-ej5oVy6lykXsvieQtqZxCOaLT+xD4+QNarq78cIYISHmZXshCvROLudpQN3lfL8G0NL7plMSSK+zlyvCaIJ4Iw==}
engines: {node: '>=10'}
dev: false
- /@element-plus/icons-vue/2.0.9_vue@3.2.37:
+ /@element-plus/icons-vue@2.0.9(vue@3.2.37):
resolution: {integrity: sha512-okdrwiVeKBmW41Hkl0eMrXDjzJwhQMuKiBOu17rOszqM+LS/yBYpNQNV5Jvoh06Wc+89fMmb/uhzf8NZuDuUaQ==}
peerDependencies:
vue: ^3.2.0
@@ -104,7 +130,7 @@ packages:
vue: 3.2.37
dev: false
- /@esbuild/linux-loong64/0.14.54:
+ /@esbuild/linux-loong64@0.14.54:
resolution: {integrity: sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw==}
engines: {node: '>=12'}
cpu: [loong64]
@@ -113,7 +139,7 @@ packages:
dev: true
optional: true
- /@eslint/eslintrc/1.3.0:
+ /@eslint/eslintrc@1.3.0:
resolution: {integrity: sha512-UWW0TMTmk2d7hLcWD1/e2g5HDM/HQ3csaLSqXCfqwh4uNDuNqlaKWXmEsL4Cs41Z0KnILNvwbHAah3C2yt06kw==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
dependencies:
@@ -130,17 +156,17 @@ packages:
- supports-color
dev: true
- /@floating-ui/core/0.7.3:
+ /@floating-ui/core@0.7.3:
resolution: {integrity: sha512-buc8BXHmG9l82+OQXOFU3Kr2XQx9ys01U/Q9HMIrZ300iLc8HLMgh7dcCqgYzAzf4BkoQvDcXf5Y+CuEZ5JBYg==}
dev: false
- /@floating-ui/dom/0.5.4:
+ /@floating-ui/dom@0.5.4:
resolution: {integrity: sha512-419BMceRLq0RrmTSDxn8hf9R3VCJv2K9PUfugh5JyEFmdjzDo+e8U5EdR8nzKq8Yj1htzLm3b6eQEEam3/rrtg==}
dependencies:
'@floating-ui/core': 0.7.3
dev: false
- /@humanwhocodes/config-array/0.10.4:
+ /@humanwhocodes/config-array@0.10.4:
resolution: {integrity: sha512-mXAIHxZT3Vcpg83opl1wGlVZ9xydbfZO3r5YfRSH6Gpp2J/PfdBP0wbDa2sO6/qRbcalpoevVyW6A/fI6LfeMw==}
engines: {node: '>=10.10.0'}
dependencies:
@@ -151,25 +177,25 @@ packages:
- supports-color
dev: true
- /@humanwhocodes/gitignore-to-minimatch/1.0.2:
+ /@humanwhocodes/gitignore-to-minimatch@1.0.2:
resolution: {integrity: sha512-rSqmMJDdLFUsyxR6FMtD00nfQKKLFb1kv+qBbOVKqErvloEIJLo5bDTJTQNTYgeyp78JsA7u/NPi5jT1GR/MuA==}
dev: true
- /@humanwhocodes/object-schema/1.2.1:
+ /@humanwhocodes/object-schema@1.2.1:
resolution: {integrity: sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==}
dev: true
- /@iconify-json/ep/1.1.7:
+ /@iconify-json/ep@1.1.7:
resolution: {integrity: sha512-GhXWVKalXFlrGgfrCXAgqBre5hv3pPAknuxyywmjamcrL5gl5Mq9WOZtuhb4cB6cJ5pMiKOMtegt73FheqWscA==}
dependencies:
'@iconify/types': 1.1.0
dev: true
- /@iconify/types/1.1.0:
+ /@iconify/types@1.1.0:
resolution: {integrity: sha512-Jh0llaK2LRXQoYsorIH8maClebsnzTcve+7U3rQUSnC11X4jtPnFuyatqFLvMxZ8MLG8dB4zfHsbPfuvxluONw==}
dev: true
- /@intlify/core-base/9.2.2:
+ /@intlify/core-base@9.2.2:
resolution: {integrity: sha512-JjUpQtNfn+joMbrXvpR4hTF8iJQ2sEFzzK3KIESOx+f+uwIjgw20igOyaIdhfsVVBCds8ZM64MoeNSx+PHQMkA==}
engines: {node: '>= 14'}
dependencies:
@@ -179,14 +205,14 @@ packages:
'@intlify/vue-devtools': 9.2.2
dev: false
- /@intlify/devtools-if/9.2.2:
+ /@intlify/devtools-if@9.2.2:
resolution: {integrity: sha512-4ttr/FNO29w+kBbU7HZ/U0Lzuh2cRDhP8UlWOtV9ERcjHzuyXVZmjyleESK6eVP60tGC9QtQW9yZE+JeRhDHkg==}
engines: {node: '>= 14'}
dependencies:
'@intlify/shared': 9.2.2
dev: false
- /@intlify/message-compiler/9.2.2:
+ /@intlify/message-compiler@9.2.2:
resolution: {integrity: sha512-IUrQW7byAKN2fMBe8z6sK6riG1pue95e5jfokn8hA5Q3Bqy4MBJ5lJAofUsawQJYHeoPJ7svMDyBaVJ4d0GTtA==}
engines: {node: '>= 14'}
dependencies:
@@ -194,12 +220,12 @@ packages:
source-map: 0.6.1
dev: false
- /@intlify/shared/9.2.2:
+ /@intlify/shared@9.2.2:
resolution: {integrity: sha512-wRwTpsslgZS5HNyM7uDQYZtxnbI12aGiBZURX3BTR9RFIKKRWpllTsgzHWvj3HKm3Y2Sh5LPC1r0PDCKEhVn9Q==}
engines: {node: '>= 14'}
dev: false
- /@intlify/vue-devtools/9.2.2:
+ /@intlify/vue-devtools@9.2.2:
resolution: {integrity: sha512-+dUyqyCHWHb/UcvY1MlIpO87munedm3Gn6E9WWYdWrMuYLcoIoOEVDWSS8xSwtlPU+kA+MEQTP6Q1iI/ocusJg==}
engines: {node: '>= 14'}
dependencies:
@@ -207,28 +233,28 @@ packages:
'@intlify/shared': 9.2.2
dev: false
- /@istanbuljs/schema/0.1.3:
+ /@istanbuljs/schema@0.1.3:
resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==}
engines: {node: '>=8'}
dev: true
- /@jridgewell/resolve-uri/3.1.0:
+ /@jridgewell/resolve-uri@3.1.0:
resolution: {integrity: sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==}
engines: {node: '>=6.0.0'}
dev: true
- /@jridgewell/sourcemap-codec/1.4.14:
+ /@jridgewell/sourcemap-codec@1.4.14:
resolution: {integrity: sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==}
dev: true
- /@jridgewell/trace-mapping/0.3.15:
+ /@jridgewell/trace-mapping@0.3.15:
resolution: {integrity: sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==}
dependencies:
'@jridgewell/resolve-uri': 3.1.0
'@jridgewell/sourcemap-codec': 1.4.14
dev: true
- /@nodelib/fs.scandir/2.1.5:
+ /@nodelib/fs.scandir@2.1.5:
resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
engines: {node: '>= 8'}
dependencies:
@@ -236,12 +262,12 @@ packages:
run-parallel: 1.2.0
dev: true
- /@nodelib/fs.stat/2.0.5:
+ /@nodelib/fs.stat@2.0.5:
resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==}
engines: {node: '>= 8'}
dev: true
- /@nodelib/fs.walk/1.2.8:
+ /@nodelib/fs.walk@1.2.8:
resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==}
engines: {node: '>= 8'}
dependencies:
@@ -249,52 +275,52 @@ packages:
fastq: 1.13.0
dev: true
- /@sxzz/popperjs-es/2.11.7:
+ /@sxzz/popperjs-es@2.11.7:
resolution: {integrity: sha512-Ccy0NlLkzr0Ex2FKvh2X+OyERHXJ88XJ1MXtsI9y9fGexlaXaVTPzBCRBwIxFkORuOb+uBqeu+RqnpgYTEZRUQ==}
dev: false
- /@tootallnate/once/2.0.0:
+ /@tootallnate/once@2.0.0:
resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==}
engines: {node: '>= 10'}
dev: true
- /@types/chai-subset/1.3.3:
+ /@types/chai-subset@1.3.3:
resolution: {integrity: sha512-frBecisrNGz+F4T6bcc+NLeolfiojh5FxW2klu669+8BARtyQv2C/GkNW6FUodVe4BroGMP/wER/YDGc7rEllw==}
dependencies:
'@types/chai': 4.3.3
dev: true
- /@types/chai/4.3.3:
+ /@types/chai@4.3.3:
resolution: {integrity: sha512-hC7OMnszpxhZPduX+m+nrx+uFoLkWOMiR4oa/AZF3MuSETYTZmFfJAHqZEM8MVlvfG7BEUcgvtwoCTxBp6hm3g==}
dev: true
- /@types/istanbul-lib-coverage/2.0.4:
+ /@types/istanbul-lib-coverage@2.0.4:
resolution: {integrity: sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==}
dev: true
- /@types/json-schema/7.0.11:
+ /@types/json-schema@7.0.11:
resolution: {integrity: sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==}
dev: true
- /@types/lodash-es/4.17.6:
+ /@types/lodash-es@4.17.6:
resolution: {integrity: sha512-R+zTeVUKDdfoRxpAryaQNRKk3105Rrgx2CFRClIgRGaqDTdjsm8h6IYA8ir584W3ePzkZfst5xIgDwYrlh9HLg==}
dependencies:
'@types/lodash': 4.14.183
dev: false
- /@types/lodash/4.14.183:
+ /@types/lodash@4.14.183:
resolution: {integrity: sha512-UXavyuxzXKMqJPEpFPri6Ku5F9af6ZJXUneHhvQJxavrEjuHkFp2YnDWHcxJiG7hk8ZkWqjcyNeW1s/smZv5cw==}
dev: false
- /@types/node/18.7.6:
+ /@types/node@18.7.6:
resolution: {integrity: sha512-EdxgKRXgYsNITy5mjjXjVE/CS8YENSdhiagGrLqjG0pvA2owgJ6i4l7wy/PFZGC0B1/H20lWKN7ONVDNYDZm7A==}
dev: true
- /@types/web-bluetooth/0.0.14:
+ /@types/web-bluetooth@0.0.14:
resolution: {integrity: sha512-5d2RhCard1nQUC3aHcq/gHzWYO6K0WJmAbjO7mQJgCQKtZpgXxv1rOM6O/dBDhDYYVutk1sciOgNSe+5YyfM8A==}
dev: false
- /@typescript-eslint/eslint-plugin/5.33.1_vsoshirnpb7xw6mr7xomgfas2i:
+ /@typescript-eslint/eslint-plugin@5.33.1(@typescript-eslint/parser@5.33.1)(eslint@8.22.0)(typescript@4.7.4):
resolution: {integrity: sha512-S1iZIxrTvKkU3+m63YUOxYPKaP+yWDQrdhxTglVDVEVBf+aCSw85+BmJnyUaQQsk5TXFG/LpBu9fa+LrAQ91fQ==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
peerDependencies:
@@ -305,23 +331,23 @@ packages:
typescript:
optional: true
dependencies:
- '@typescript-eslint/parser': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq
+ '@typescript-eslint/parser': 5.33.1(eslint@8.22.0)(typescript@4.7.4)
'@typescript-eslint/scope-manager': 5.33.1
- '@typescript-eslint/type-utils': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq
- '@typescript-eslint/utils': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq
+ '@typescript-eslint/type-utils': 5.33.1(eslint@8.22.0)(typescript@4.7.4)
+ '@typescript-eslint/utils': 5.33.1(eslint@8.22.0)(typescript@4.7.4)
debug: 4.3.4
eslint: 8.22.0
functional-red-black-tree: 1.0.1
ignore: 5.2.0
regexpp: 3.2.0
semver: 7.3.7
- tsutils: 3.21.0_typescript@4.7.4
+ tsutils: 3.21.0(typescript@4.7.4)
typescript: 4.7.4
transitivePeerDependencies:
- supports-color
dev: true
- /@typescript-eslint/parser/5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq:
+ /@typescript-eslint/parser@5.33.1(eslint@8.22.0)(typescript@4.7.4):
resolution: {integrity: sha512-IgLLtW7FOzoDlmaMoXdxG8HOCByTBXrB1V2ZQYSEV1ggMmJfAkMWTwUjjzagS6OkfpySyhKFkBw7A9jYmcHpZA==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
peerDependencies:
@@ -333,7 +359,7 @@ packages:
dependencies:
'@typescript-eslint/scope-manager': 5.33.1
'@typescript-eslint/types': 5.33.1
- '@typescript-eslint/typescript-estree': 5.33.1_typescript@4.7.4
+ '@typescript-eslint/typescript-estree': 5.33.1(typescript@4.7.4)
debug: 4.3.4
eslint: 8.22.0
typescript: 4.7.4
@@ -341,7 +367,7 @@ packages:
- supports-color
dev: true
- /@typescript-eslint/scope-manager/5.33.1:
+ /@typescript-eslint/scope-manager@5.33.1:
resolution: {integrity: sha512-8ibcZSqy4c5m69QpzJn8XQq9NnqAToC8OdH/W6IXPXv83vRyEDPYLdjAlUx8h/rbusq6MkW4YdQzURGOqsn3CA==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
dependencies:
@@ -349,7 +375,7 @@ packages:
'@typescript-eslint/visitor-keys': 5.33.1
dev: true
- /@typescript-eslint/type-utils/5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq:
+ /@typescript-eslint/type-utils@5.33.1(eslint@8.22.0)(typescript@4.7.4):
resolution: {integrity: sha512-X3pGsJsD8OiqhNa5fim41YtlnyiWMF/eKsEZGsHID2HcDqeSC5yr/uLOeph8rNF2/utwuI0IQoAK3fpoxcLl2g==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
peerDependencies:
@@ -359,21 +385,21 @@ packages:
typescript:
optional: true
dependencies:
- '@typescript-eslint/utils': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq
+ '@typescript-eslint/utils': 5.33.1(eslint@8.22.0)(typescript@4.7.4)
debug: 4.3.4
eslint: 8.22.0
- tsutils: 3.21.0_typescript@4.7.4
+ tsutils: 3.21.0(typescript@4.7.4)
typescript: 4.7.4
transitivePeerDependencies:
- supports-color
dev: true
- /@typescript-eslint/types/5.33.1:
+ /@typescript-eslint/types@5.33.1:
resolution: {integrity: sha512-7K6MoQPQh6WVEkMrMW5QOA5FO+BOwzHSNd0j3+BlBwd6vtzfZceJ8xJ7Um2XDi/O3umS8/qDX6jdy2i7CijkwQ==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
dev: true
- /@typescript-eslint/typescript-estree/5.33.1_typescript@4.7.4:
+ /@typescript-eslint/typescript-estree@5.33.1(typescript@4.7.4):
resolution: {integrity: sha512-JOAzJ4pJ+tHzA2pgsWQi4804XisPHOtbvwUyqsuuq8+y5B5GMZs7lI1xDWs6V2d7gE/Ez5bTGojSK12+IIPtXA==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
peerDependencies:
@@ -388,13 +414,13 @@ packages:
globby: 11.1.0
is-glob: 4.0.3
semver: 7.3.7
- tsutils: 3.21.0_typescript@4.7.4
+ tsutils: 3.21.0(typescript@4.7.4)
typescript: 4.7.4
transitivePeerDependencies:
- supports-color
dev: true
- /@typescript-eslint/utils/5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq:
+ /@typescript-eslint/utils@5.33.1(eslint@8.22.0)(typescript@4.7.4):
resolution: {integrity: sha512-uphZjkMaZ4fE8CR4dU7BquOV6u0doeQAr8n6cQenl/poMaIyJtBu8eys5uk6u5HiDH01Mj5lzbJ5SfeDz7oqMQ==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
peerDependencies:
@@ -403,16 +429,16 @@ packages:
'@types/json-schema': 7.0.11
'@typescript-eslint/scope-manager': 5.33.1
'@typescript-eslint/types': 5.33.1
- '@typescript-eslint/typescript-estree': 5.33.1_typescript@4.7.4
+ '@typescript-eslint/typescript-estree': 5.33.1(typescript@4.7.4)
eslint: 8.22.0
eslint-scope: 5.1.1
- eslint-utils: 3.0.0_eslint@8.22.0
+ eslint-utils: 3.0.0(eslint@8.22.0)
transitivePeerDependencies:
- supports-color
- typescript
dev: true
- /@typescript-eslint/visitor-keys/5.33.1:
+ /@typescript-eslint/visitor-keys@5.33.1:
resolution: {integrity: sha512-nwIxOK8Z2MPWltLKMLOEZwmfBZReqUdbEoHQXeCpa+sRVARe5twpJGHCB4dk9903Yaf0nMAlGbQfaAH92F60eg==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
dependencies:
@@ -420,22 +446,22 @@ packages:
eslint-visitor-keys: 3.3.0
dev: true
- /@vitejs/plugin-vue/3.0.3_vite@3.0.8+vue@3.2.37:
+ /@vitejs/plugin-vue@3.0.3(vite@3.0.8)(vue@3.2.37):
resolution: {integrity: sha512-U4zNBlz9mg+TA+i+5QPc3N5lQvdUXENZLO2h0Wdzp56gI1MWhqJOv+6R+d4kOzoaSSq6TnGPBdZAXKOe4lXy6g==}
engines: {node: ^14.18.0 || >=16.0.0}
peerDependencies:
vite: ^3.0.0
vue: ^3.2.25
dependencies:
- vite: 3.0.8_sass@1.54.4
+ vite: 3.0.8(sass@1.54.4)
vue: 3.2.37
dev: true
- /@vitest/coverage-c8/0.22.0_jsdom@20.0.0+sass@1.54.4:
+ /@vitest/coverage-c8@0.22.0(jsdom@20.0.0)(sass@1.54.4):
resolution: {integrity: sha512-jwW6b8U+h9nbzQfKoRmpf2xjDg+mcAjLIdVUrZGhjTnIdekGfvoqFoeiXzsLv2HwYBeFi4943lYUftuj8qD1FQ==}
dependencies:
c8: 7.12.0
- vitest: 0.22.0_jsdom@20.0.0+sass@1.54.4
+ vitest: 0.22.0(jsdom@20.0.0)(sass@1.54.4)
transitivePeerDependencies:
- '@edge-runtime/vm'
- '@vitest/browser'
@@ -449,17 +475,17 @@ packages:
- terser
dev: true
- /@volar/code-gen/0.38.9:
+ /@volar/code-gen@0.38.9:
resolution: {integrity: sha512-n6LClucfA+37rQeskvh9vDoZV1VvCVNy++MAPKj2dT4FT+Fbmty/SDQqnsEBtdEe6E3OQctFvA/IcKsx3Mns0A==}
dependencies:
'@volar/source-map': 0.38.9
dev: true
- /@volar/source-map/0.38.9:
+ /@volar/source-map@0.38.9:
resolution: {integrity: sha512-ba0UFoHDYry+vwKdgkWJ6xlQT+8TFtZg1zj9tSjj4PykW1JZDuM0xplMotLun4h3YOoYfY9K1huY5gvxmrNLIw==}
dev: true
- /@volar/vue-code-gen/0.38.9:
+ /@volar/vue-code-gen@0.38.9:
resolution: {integrity: sha512-tzj7AoarFBKl7e41MR006ncrEmNPHALuk8aG4WdDIaG387X5//5KhWC5Ff3ZfB2InGSeNT+CVUd74M0gS20rjA==}
dependencies:
'@volar/code-gen': 0.38.9
@@ -469,7 +495,7 @@ packages:
'@vue/shared': 3.2.37
dev: true
- /@volar/vue-typescript/0.38.9:
+ /@volar/vue-typescript@0.38.9:
resolution: {integrity: sha512-iJMQGU91ADi98u8V1vXd2UBmELDAaeSP0ZJaFjwosClQdKlJQYc6MlxxKfXBZisHqfbhdtrGRyaryulnYtliZw==}
dependencies:
'@volar/code-gen': 0.38.9
@@ -479,7 +505,7 @@ packages:
'@vue/reactivity': 3.2.37
dev: true
- /@vue/compiler-core/3.2.37:
+ /@vue/compiler-core@3.2.37:
resolution: {integrity: sha512-81KhEjo7YAOh0vQJoSmAD68wLfYqJvoiD4ulyedzF+OEk/bk6/hx3fTNVfuzugIIaTrOx4PGx6pAiBRe5e9Zmg==}
dependencies:
'@babel/parser': 7.18.11
@@ -487,13 +513,13 @@ packages:
estree-walker: 2.0.2
source-map: 0.6.1
- /@vue/compiler-dom/3.2.37:
+ /@vue/compiler-dom@3.2.37:
resolution: {integrity: sha512-yxJLH167fucHKxaqXpYk7x8z7mMEnXOw3G2q62FTkmsvNxu4FQSu5+3UMb+L7fjKa26DEzhrmCxAgFLLIzVfqQ==}
dependencies:
'@vue/compiler-core': 3.2.37
'@vue/shared': 3.2.37
- /@vue/compiler-sfc/3.2.37:
+ /@vue/compiler-sfc@3.2.37:
resolution: {integrity: sha512-+7i/2+9LYlpqDv+KTtWhOZH+pa8/HnX/905MdVmAcI/mPQOBwkHHIzrsEsucyOIZQYMkXUiTkmZq5am/NyXKkg==}
dependencies:
'@babel/parser': 7.18.11
@@ -507,29 +533,29 @@ packages:
postcss: 8.4.16
source-map: 0.6.1
- /@vue/compiler-ssr/3.2.37:
+ /@vue/compiler-ssr@3.2.37:
resolution: {integrity: sha512-7mQJD7HdXxQjktmsWp/J67lThEIcxLemz1Vb5I6rYJHR5vI+lON3nPGOH3ubmbvYGt8xEUaAr1j7/tIFWiEOqw==}
dependencies:
'@vue/compiler-dom': 3.2.37
'@vue/shared': 3.2.37
- /@vue/devtools-api/6.2.1:
+ /@vue/devtools-api@6.2.1:
resolution: {integrity: sha512-OEgAMeQXvCoJ+1x8WyQuVZzFo0wcyCmUR3baRVLmKBo1LmYZWMlRiXlux5jd0fqVJu6PfDbOrZItVqUEzLobeQ==}
dev: false
- /@vue/eslint-config-prettier/7.0.0_2xd4q2tc5cqa5as7uugqhp6oue:
+ /@vue/eslint-config-prettier@7.0.0(eslint@8.22.0)(prettier@2.7.1):
resolution: {integrity: sha512-/CTc6ML3Wta1tCe1gUeO0EYnVXfo3nJXsIhZ8WJr3sov+cGASr6yuiibJTL6lmIBm7GobopToOuB3B6AWyV0Iw==}
peerDependencies:
eslint: '>= 7.28.0'
prettier: '>= 2.0.0'
dependencies:
eslint: 8.22.0
- eslint-config-prettier: 8.5.0_eslint@8.22.0
- eslint-plugin-prettier: 4.2.1_i2cojdczqdiurzgttlwdgf764e
+ eslint-config-prettier: 8.5.0(eslint@8.22.0)
+ eslint-plugin-prettier: 4.2.1(eslint-config-prettier@8.5.0)(eslint@8.22.0)(prettier@2.7.1)
prettier: 2.7.1
dev: true
- /@vue/eslint-config-typescript/11.0.0_4py5zxx5ck6utobkmozwvrmyiy:
+ /@vue/eslint-config-typescript@11.0.0(eslint-plugin-vue@9.3.0)(eslint@8.22.0)(typescript@4.7.4):
resolution: {integrity: sha512-txuRzxnQVmtUvvy9UyWUy9sHWXNeRPGmSPqP53hRtaiUeCTAondI9Ho9GQYI/8/eWljYOST7iA4Aa8sANBkWaA==}
engines: {node: ^14.17.0 || >=16.0.0}
peerDependencies:
@@ -540,17 +566,17 @@ packages:
typescript:
optional: true
dependencies:
- '@typescript-eslint/eslint-plugin': 5.33.1_vsoshirnpb7xw6mr7xomgfas2i
- '@typescript-eslint/parser': 5.33.1_4rv7y5c6xz3vfxwhbrcxxi73bq
+ '@typescript-eslint/eslint-plugin': 5.33.1(@typescript-eslint/parser@5.33.1)(eslint@8.22.0)(typescript@4.7.4)
+ '@typescript-eslint/parser': 5.33.1(eslint@8.22.0)(typescript@4.7.4)
eslint: 8.22.0
- eslint-plugin-vue: 9.3.0_eslint@8.22.0
+ eslint-plugin-vue: 9.3.0(eslint@8.22.0)
typescript: 4.7.4
- vue-eslint-parser: 9.0.3_eslint@8.22.0
+ vue-eslint-parser: 9.0.3(eslint@8.22.0)
transitivePeerDependencies:
- supports-color
dev: true
- /@vue/reactivity-transform/3.2.37:
+ /@vue/reactivity-transform@3.2.37:
resolution: {integrity: sha512-IWopkKEb+8qpu/1eMKVeXrK0NLw9HicGviJzhJDEyfxTR9e1WtpnnbYkJWurX6WwoFP0sz10xQg8yL8lgskAZg==}
dependencies:
'@babel/parser': 7.18.11
@@ -559,25 +585,25 @@ packages:
estree-walker: 2.0.2
magic-string: 0.25.9
- /@vue/reactivity/3.2.37:
+ /@vue/reactivity@3.2.37:
resolution: {integrity: sha512-/7WRafBOshOc6m3F7plwzPeCu/RCVv9uMpOwa/5PiY1Zz+WLVRWiy0MYKwmg19KBdGtFWsmZ4cD+LOdVPcs52A==}
dependencies:
'@vue/shared': 3.2.37
- /@vue/runtime-core/3.2.37:
+ /@vue/runtime-core@3.2.37:
resolution: {integrity: sha512-JPcd9kFyEdXLl/i0ClS7lwgcs0QpUAWj+SKX2ZC3ANKi1U4DOtiEr6cRqFXsPwY5u1L9fAjkinIdB8Rz3FoYNQ==}
dependencies:
'@vue/reactivity': 3.2.37
'@vue/shared': 3.2.37
- /@vue/runtime-dom/3.2.37:
+ /@vue/runtime-dom@3.2.37:
resolution: {integrity: sha512-HimKdh9BepShW6YozwRKAYjYQWg9mQn63RGEiSswMbW+ssIht1MILYlVGkAGGQbkhSh31PCdoUcfiu4apXJoPw==}
dependencies:
'@vue/runtime-core': 3.2.37
'@vue/shared': 3.2.37
csstype: 2.6.20
- /@vue/server-renderer/3.2.37_vue@3.2.37:
+ /@vue/server-renderer@3.2.37(vue@3.2.37):
resolution: {integrity: sha512-kLITEJvaYgZQ2h47hIzPh2K3jG8c1zCVbp/o/bzQOyvzaKiCquKS7AaioPI28GNxIsE/zSx+EwWYsNxDCX95MA==}
peerDependencies:
vue: 3.2.37
@@ -586,10 +612,10 @@ packages:
'@vue/shared': 3.2.37
vue: 3.2.37
- /@vue/shared/3.2.37:
+ /@vue/shared@3.2.37:
resolution: {integrity: sha512-4rSJemR2NQIo9Klm1vabqWjD8rs/ZaJSzMxkMNeJS6lHiUjjUeYFbooN19NgFjztubEKh3WlZUeOLVdbbUWHsw==}
- /@vue/test-utils/2.0.2_vue@3.2.37:
+ /@vue/test-utils@2.0.2(vue@3.2.37):
resolution: {integrity: sha512-E2P4oXSaWDqTZNbmKZFVLrNN/siVN78YkEqs7pHryWerrlZR9bBFLWdJwRoguX45Ru6HxIflzKl4vQvwRMwm5g==}
peerDependencies:
vue: ^3.0.1
@@ -597,7 +623,7 @@ packages:
vue: 3.2.37
dev: true
- /@vueuse/core/8.9.4_vue@3.2.37:
+ /@vueuse/core@8.9.4(vue@3.2.37):
resolution: {integrity: sha512-B/Mdj9TK1peFyWaPof+Zf/mP9XuGAngaJZBwPaXBvU3aCTZlx3ltlrFFFyMV4iGBwsjSCeUCgZrtkEj9dS2Y3Q==}
peerDependencies:
'@vue/composition-api': ^1.1.0
@@ -610,16 +636,16 @@ packages:
dependencies:
'@types/web-bluetooth': 0.0.14
'@vueuse/metadata': 8.9.4
- '@vueuse/shared': 8.9.4_vue@3.2.37
+ '@vueuse/shared': 8.9.4(vue@3.2.37)
vue: 3.2.37
- vue-demi: 0.13.8_vue@3.2.37
+ vue-demi: 0.13.8(vue@3.2.37)
dev: false
- /@vueuse/metadata/8.9.4:
+ /@vueuse/metadata@8.9.4:
resolution: {integrity: sha512-IwSfzH80bnJMzqhaapqJl9JRIiyQU0zsRGEgnxN6jhq7992cPUJIRfV+JHRIZXjYqbwt07E1gTEp0R0zPJ1aqw==}
dev: false
- /@vueuse/shared/8.9.4_vue@3.2.37:
+ /@vueuse/shared@8.9.4(vue@3.2.37):
resolution: {integrity: sha512-wt+T30c4K6dGRMVqPddexEVLa28YwxW5OFIPmzUHICjphfAuBFTTdDoyqREZNDOFJZ44ARH1WWQNCUK8koJ+Ag==}
peerDependencies:
'@vue/composition-api': ^1.1.0
@@ -631,21 +657,21 @@ packages:
optional: true
dependencies:
vue: 3.2.37
- vue-demi: 0.13.8_vue@3.2.37
+ vue-demi: 0.13.8(vue@3.2.37)
dev: false
- /abab/2.0.6:
+ /abab@2.0.6:
resolution: {integrity: sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==}
dev: true
- /acorn-globals/6.0.0:
+ /acorn-globals@6.0.0:
resolution: {integrity: sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg==}
dependencies:
acorn: 7.4.1
acorn-walk: 7.2.0
dev: true
- /acorn-jsx/5.3.2_acorn@8.8.0:
+ /acorn-jsx@5.3.2(acorn@8.8.0):
resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
peerDependencies:
acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
@@ -653,24 +679,24 @@ packages:
acorn: 8.8.0
dev: true
- /acorn-walk/7.2.0:
+ /acorn-walk@7.2.0:
resolution: {integrity: sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==}
engines: {node: '>=0.4.0'}
dev: true
- /acorn/7.4.1:
+ /acorn@7.4.1:
resolution: {integrity: sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==}
engines: {node: '>=0.4.0'}
hasBin: true
dev: true
- /acorn/8.8.0:
+ /acorn@8.8.0:
resolution: {integrity: sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==}
engines: {node: '>=0.4.0'}
hasBin: true
dev: true
- /agent-base/6.0.2:
+ /agent-base@6.0.2:
resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==}
engines: {node: '>= 6.0.0'}
dependencies:
@@ -679,7 +705,7 @@ packages:
- supports-color
dev: true
- /ajv/6.12.6:
+ /ajv@6.12.6:
resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==}
dependencies:
fast-deep-equal: 3.1.3
@@ -688,19 +714,19 @@ packages:
uri-js: 4.4.1
dev: true
- /ansi-regex/5.0.1:
+ /ansi-regex@5.0.1:
resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
engines: {node: '>=8'}
dev: true
- /ansi-styles/4.3.0:
+ /ansi-styles@4.3.0:
resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
engines: {node: '>=8'}
dependencies:
color-convert: 2.0.1
dev: true
- /anymatch/3.1.2:
+ /anymatch@3.1.2:
resolution: {integrity: sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==}
engines: {node: '>= 8'}
dependencies:
@@ -708,27 +734,27 @@ packages:
picomatch: 2.3.1
dev: true
- /argparse/2.0.1:
+ /argparse@2.0.1:
resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
dev: true
- /array-union/2.1.0:
+ /array-union@2.1.0:
resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==}
engines: {node: '>=8'}
dev: true
- /assertion-error/1.1.0:
+ /assertion-error@1.1.0:
resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==}
dev: true
- /async-validator/4.2.5:
+ /async-validator@4.2.5:
resolution: {integrity: sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==}
dev: false
- /asynckit/0.4.0:
+ /asynckit@0.4.0:
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
- /axios/0.27.2:
+ /axios@0.27.2:
resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==}
dependencies:
follow-redirects: 1.15.1
@@ -737,38 +763,38 @@ packages:
- debug
dev: false
- /balanced-match/1.0.2:
+ /balanced-match@1.0.2:
resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
dev: true
- /binary-extensions/2.2.0:
+ /binary-extensions@2.2.0:
resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==}
engines: {node: '>=8'}
dev: true
- /boolbase/1.0.0:
+ /boolbase@1.0.0:
resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==}
dev: true
- /brace-expansion/1.1.11:
+ /brace-expansion@1.1.11:
resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==}
dependencies:
balanced-match: 1.0.2
concat-map: 0.0.1
dev: true
- /braces/3.0.2:
+ /braces@3.0.2:
resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==}
engines: {node: '>=8'}
dependencies:
fill-range: 7.0.1
dev: true
- /browser-process-hrtime/1.0.0:
+ /browser-process-hrtime@1.0.0:
resolution: {integrity: sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==}
dev: true
- /c8/7.12.0:
+ /c8@7.12.0:
resolution: {integrity: sha512-CtgQrHOkyxr5koX1wEUmN/5cfDa2ckbHRA4Gy5LAL0zaCFtVWJS5++n+w4/sr2GWGerBxgTjpKeDclk/Qk6W/A==}
engines: {node: '>=10.12.0'}
hasBin: true
@@ -787,12 +813,12 @@ packages:
yargs-parser: 20.2.9
dev: true
- /callsites/3.1.0:
+ /callsites@3.1.0:
resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
engines: {node: '>=6'}
dev: true
- /chai/4.3.6:
+ /chai@4.3.6:
resolution: {integrity: sha512-bbcp3YfHCUzMOvKqsztczerVgBKSsEijCySNlHHbX3VG1nskvqjz5Rfso1gGwD6w6oOV3eI60pKuMOV5MV7p3Q==}
engines: {node: '>=4'}
dependencies:
@@ -805,7 +831,7 @@ packages:
type-detect: 4.0.8
dev: true
- /chalk/4.1.2:
+ /chalk@4.1.2:
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
engines: {node: '>=10'}
dependencies:
@@ -813,11 +839,11 @@ packages:
supports-color: 7.2.0
dev: true
- /check-error/1.0.2:
+ /check-error@1.0.2:
resolution: {integrity: sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==}
dev: true
- /chokidar/3.5.3:
+ /chokidar@3.5.3:
resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==}
engines: {node: '>= 8.10.0'}
dependencies:
@@ -832,7 +858,7 @@ packages:
fsevents: 2.3.2
dev: true
- /cliui/7.0.4:
+ /cliui@7.0.4:
resolution: {integrity: sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==}
dependencies:
string-width: 4.2.3
@@ -840,34 +866,34 @@ packages:
wrap-ansi: 7.0.0
dev: true
- /color-convert/2.0.1:
+ /color-convert@2.0.1:
resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==}
engines: {node: '>=7.0.0'}
dependencies:
color-name: 1.1.4
dev: true
- /color-name/1.1.4:
+ /color-name@1.1.4:
resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==}
dev: true
- /combined-stream/1.0.8:
+ /combined-stream@1.0.8:
resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==}
engines: {node: '>= 0.8'}
dependencies:
delayed-stream: 1.0.0
- /concat-map/0.0.1:
+ /concat-map@0.0.1:
resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
dev: true
- /convert-source-map/1.8.0:
+ /convert-source-map@1.8.0:
resolution: {integrity: sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==}
dependencies:
safe-buffer: 5.1.2
dev: true
- /cross-spawn/7.0.3:
+ /cross-spawn@7.0.3:
resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==}
engines: {node: '>= 8'}
dependencies:
@@ -876,31 +902,31 @@ packages:
which: 2.0.2
dev: true
- /cssesc/3.0.0:
+ /cssesc@3.0.0:
resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==}
engines: {node: '>=4'}
hasBin: true
dev: true
- /cssom/0.3.8:
+ /cssom@0.3.8:
resolution: {integrity: sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==}
dev: true
- /cssom/0.5.0:
+ /cssom@0.5.0:
resolution: {integrity: sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==}
dev: true
- /cssstyle/2.3.0:
+ /cssstyle@2.3.0:
resolution: {integrity: sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==}
engines: {node: '>=8'}
dependencies:
cssom: 0.3.8
dev: true
- /csstype/2.6.20:
+ /csstype@2.6.20:
resolution: {integrity: sha512-/WwNkdXfckNgw6S5R125rrW8ez139lBHWouiBvX8dfMFtcn6V81REDqnH7+CRpRipfYlyU1CmOnOxrmGcFOjeA==}
- /data-urls/3.0.2:
+ /data-urls@3.0.2:
resolution: {integrity: sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==}
engines: {node: '>=12'}
dependencies:
@@ -909,16 +935,16 @@ packages:
whatwg-url: 11.0.0
dev: true
- /date-fns/2.29.3:
+ /date-fns@2.29.3:
resolution: {integrity: sha512-dDCnyH2WnnKusqvZZ6+jA1O51Ibt8ZMRNkDZdyAyK4YfbDwa/cEmuztzG5pk6hqlp9aSBPYcjOlktquahGwGeA==}
engines: {node: '>=0.11'}
dev: false
- /dayjs/1.11.5:
+ /dayjs@1.11.5:
resolution: {integrity: sha512-CAdX5Q3YW3Gclyo5Vpqkgpj8fSdLQcRuzfX6mC6Phy0nfJ0eGYOeS7m4mt2plDWLAtA4TqTakvbboHvUxfe4iA==}
dev: false
- /debug/4.3.4:
+ /debug@4.3.4:
resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==}
engines: {node: '>=6.0'}
peerDependencies:
@@ -930,64 +956,64 @@ packages:
ms: 2.1.2
dev: true
- /decimal.js/10.4.0:
+ /decimal.js@10.4.0:
resolution: {integrity: sha512-Nv6ENEzyPQ6AItkGwLE2PGKinZZ9g59vSh2BeH6NqPu0OTKZ5ruJsVqh/orbAnqXc9pBbgXAIrc2EyaCj8NpGg==}
dev: true
- /deep-eql/3.0.1:
+ /deep-eql@3.0.1:
resolution: {integrity: sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==}
engines: {node: '>=0.12'}
dependencies:
type-detect: 4.0.8
dev: true
- /deep-is/0.1.4:
+ /deep-is@0.1.4:
resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==}
dev: true
- /delayed-stream/1.0.0:
+ /delayed-stream@1.0.0:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
engines: {node: '>=0.4.0'}
- /dir-glob/3.0.1:
+ /dir-glob@3.0.1:
resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==}
engines: {node: '>=8'}
dependencies:
path-type: 4.0.0
dev: true
- /doctrine/3.0.0:
+ /doctrine@3.0.0:
resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==}
engines: {node: '>=6.0.0'}
dependencies:
esutils: 2.0.3
dev: true
- /domexception/4.0.0:
+ /domexception@4.0.0:
resolution: {integrity: sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==}
engines: {node: '>=12'}
dependencies:
webidl-conversions: 7.0.0
dev: true
- /element-plus/2.2.13_vue@3.2.37:
+ /element-plus@2.2.13(vue@3.2.37):
resolution: {integrity: sha512-dKQ7BPZC8deUPhv+6s4GgOL0GyGj3KpUarywxm6s1nWnHjH6FqeZlUcxPqBvJd7W/d81POayx3B13GP+rfkG9g==}
peerDependencies:
vue: ^3.2.0
dependencies:
'@ctrl/tinycolor': 3.4.1
- '@element-plus/icons-vue': 2.0.9_vue@3.2.37
+ '@element-plus/icons-vue': 2.0.9(vue@3.2.37)
'@floating-ui/dom': 0.5.4
- '@popperjs/core': /@sxzz/popperjs-es/2.11.7
+ '@popperjs/core': /@sxzz/popperjs-es@2.11.7
'@types/lodash': 4.14.183
'@types/lodash-es': 4.17.6
- '@vueuse/core': 8.9.4_vue@3.2.37
+ '@vueuse/core': 8.9.4(vue@3.2.37)
async-validator: 4.2.5
dayjs: 1.11.5
escape-html: 1.0.3
lodash: 4.17.21
lodash-es: 4.17.21
- lodash-unified: 1.0.2_3ib2ivapxullxkx3xftsimdk7u
+ lodash-unified: 1.0.2(@types/lodash-es@4.17.6)(lodash-es@4.17.21)(lodash@4.17.21)
memoize-one: 6.0.0
normalize-wheel-es: 1.2.0
vue: 3.2.37
@@ -995,16 +1021,16 @@ packages:
- '@vue/composition-api'
dev: false
- /emoji-regex/8.0.0:
+ /emoji-regex@8.0.0:
resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==}
dev: true
- /entities/4.3.1:
+ /entities@4.3.1:
resolution: {integrity: sha512-o4q/dYJlmyjP2zfnaWDUC6A3BQFmVTX+tZPezK7k0GLSU9QYCauscf5Y+qcEPzKL+EixVouYDgLQK5H9GrLpkg==}
engines: {node: '>=0.12'}
dev: true
- /esbuild-android-64/0.14.54:
+ /esbuild-android-64@0.14.54:
resolution: {integrity: sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ==}
engines: {node: '>=12'}
cpu: [x64]
@@ -1013,7 +1039,7 @@ packages:
dev: true
optional: true
- /esbuild-android-arm64/0.14.54:
+ /esbuild-android-arm64@0.14.54:
resolution: {integrity: sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg==}
engines: {node: '>=12'}
cpu: [arm64]
@@ -1022,7 +1048,7 @@ packages:
dev: true
optional: true
- /esbuild-darwin-64/0.14.54:
+ /esbuild-darwin-64@0.14.54:
resolution: {integrity: sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug==}
engines: {node: '>=12'}
cpu: [x64]
@@ -1031,7 +1057,7 @@ packages:
dev: true
optional: true
- /esbuild-darwin-arm64/0.14.54:
+ /esbuild-darwin-arm64@0.14.54:
resolution: {integrity: sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw==}
engines: {node: '>=12'}
cpu: [arm64]
@@ -1040,7 +1066,7 @@ packages:
dev: true
optional: true
- /esbuild-freebsd-64/0.14.54:
+ /esbuild-freebsd-64@0.14.54:
resolution: {integrity: sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg==}
engines: {node: '>=12'}
cpu: [x64]
@@ -1049,7 +1075,7 @@ packages:
dev: true
optional: true
- /esbuild-freebsd-arm64/0.14.54:
+ /esbuild-freebsd-arm64@0.14.54:
resolution: {integrity: sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q==}
engines: {node: '>=12'}
cpu: [arm64]
@@ -1058,7 +1084,7 @@ packages:
dev: true
optional: true
- /esbuild-linux-32/0.14.54:
+ /esbuild-linux-32@0.14.54:
resolution: {integrity: sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw==}
engines: {node: '>=12'}
cpu: [ia32]
@@ -1067,7 +1093,7 @@ packages:
dev: true
optional: true
- /esbuild-linux-64/0.14.54:
+ /esbuild-linux-64@0.14.54:
resolution: {integrity: sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg==}
engines: {node: '>=12'}
cpu: [x64]
@@ -1076,25 +1102,25 @@ packages:
dev: true
optional: true
- /esbuild-linux-arm/0.14.54:
- resolution: {integrity: sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw==}
+ /esbuild-linux-arm64@0.14.54:
+ resolution: {integrity: sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig==}
engines: {node: '>=12'}
- cpu: [arm]
+ cpu: [arm64]
os: [linux]
requiresBuild: true
dev: true
optional: true
- /esbuild-linux-arm64/0.14.54:
- resolution: {integrity: sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig==}
+ /esbuild-linux-arm@0.14.54:
+ resolution: {integrity: sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw==}
engines: {node: '>=12'}
- cpu: [arm64]
+ cpu: [arm]
os: [linux]
requiresBuild: true
dev: true
optional: true
- /esbuild-linux-mips64le/0.14.54:
+ /esbuild-linux-mips64le@0.14.54:
resolution: {integrity: sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw==}
engines: {node: '>=12'}
cpu: [mips64el]
@@ -1103,7 +1129,7 @@ packages:
dev: true
optional: true
- /esbuild-linux-ppc64le/0.14.54:
+ /esbuild-linux-ppc64le@0.14.54:
resolution: {integrity: sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ==}
engines: {node: '>=12'}
cpu: [ppc64]
@@ -1112,7 +1138,7 @@ packages:
dev: true
optional: true
- /esbuild-linux-riscv64/0.14.54:
+ /esbuild-linux-riscv64@0.14.54:
resolution: {integrity: sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg==}
engines: {node: '>=12'}
cpu: [riscv64]
@@ -1121,7 +1147,7 @@ packages:
dev: true
optional: true
- /esbuild-linux-s390x/0.14.54:
+ /esbuild-linux-s390x@0.14.54:
resolution: {integrity: sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA==}
engines: {node: '>=12'}
cpu: [s390x]
@@ -1130,7 +1156,7 @@ packages:
dev: true
optional: true
- /esbuild-netbsd-64/0.14.54:
+ /esbuild-netbsd-64@0.14.54:
resolution: {integrity: sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w==}
engines: {node: '>=12'}
cpu: [x64]
@@ -1139,7 +1165,7 @@ packages:
dev: true
optional: true
- /esbuild-openbsd-64/0.14.54:
+ /esbuild-openbsd-64@0.14.54:
resolution: {integrity: sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw==}
engines: {node: '>=12'}
cpu: [x64]
@@ -1148,7 +1174,7 @@ packages:
dev: true
optional: true
- /esbuild-sunos-64/0.14.54:
+ /esbuild-sunos-64@0.14.54:
resolution: {integrity: sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw==}
engines: {node: '>=12'}
cpu: [x64]
@@ -1157,7 +1183,7 @@ packages:
dev: true
optional: true
- /esbuild-windows-32/0.14.54:
+ /esbuild-windows-32@0.14.54:
resolution: {integrity: sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w==}
engines: {node: '>=12'}
cpu: [ia32]
@@ -1166,7 +1192,7 @@ packages:
dev: true
optional: true
- /esbuild-windows-64/0.14.54:
+ /esbuild-windows-64@0.14.54:
resolution: {integrity: sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ==}
engines: {node: '>=12'}
cpu: [x64]
@@ -1175,7 +1201,7 @@ packages:
dev: true
optional: true
- /esbuild-windows-arm64/0.14.54:
+ /esbuild-windows-arm64@0.14.54:
resolution: {integrity: sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg==}
engines: {node: '>=12'}
cpu: [arm64]
@@ -1184,7 +1210,7 @@ packages:
dev: true
optional: true
- /esbuild/0.14.54:
+ /esbuild@0.14.54:
resolution: {integrity: sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA==}
engines: {node: '>=12'}
hasBin: true
@@ -1213,21 +1239,21 @@ packages:
esbuild-windows-arm64: 0.14.54
dev: true
- /escalade/3.1.1:
+ /escalade@3.1.1:
resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==}
engines: {node: '>=6'}
dev: true
- /escape-html/1.0.3:
+ /escape-html@1.0.3:
resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==}
dev: false
- /escape-string-regexp/4.0.0:
+ /escape-string-regexp@4.0.0:
resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==}
engines: {node: '>=10'}
dev: true
- /escodegen/2.0.0:
+ /escodegen@2.0.0:
resolution: {integrity: sha512-mmHKys/C8BFUGI+MAWNcSYoORYLMdPzjrknd2Vc+bUsjN5bXcr8EhrNB+UTqfL1y3I9c4fw2ihgtMPQLBRiQxw==}
engines: {node: '>=6.0'}
hasBin: true
@@ -1240,7 +1266,7 @@ packages:
source-map: 0.6.1
dev: true
- /eslint-config-prettier/8.5.0_eslint@8.22.0:
+ /eslint-config-prettier@8.5.0(eslint@8.22.0):
resolution: {integrity: sha512-obmWKLUNCnhtQRKc+tmnYuQl0pFU1ibYJQ5BGhTVB08bHe9wC8qUeG7c08dj9XX+AuPj1YSGSQIHl1pnDHZR0Q==}
hasBin: true
peerDependencies:
@@ -1249,23 +1275,7 @@ packages:
eslint: 8.22.0
dev: true
- /eslint-plugin-prettier/4.2.1_2xd4q2tc5cqa5as7uugqhp6oue:
- resolution: {integrity: sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==}
- engines: {node: '>=12.0.0'}
- peerDependencies:
- eslint: '>=7.28.0'
- eslint-config-prettier: '*'
- prettier: '>=2.0.0'
- peerDependenciesMeta:
- eslint-config-prettier:
- optional: true
- dependencies:
- eslint: 8.22.0
- prettier: 2.7.1
- prettier-linter-helpers: 1.0.0
- dev: true
-
- /eslint-plugin-prettier/4.2.1_i2cojdczqdiurzgttlwdgf764e:
+ /eslint-plugin-prettier@4.2.1(eslint-config-prettier@8.5.0)(eslint@8.22.0)(prettier@2.7.1):
resolution: {integrity: sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==}
engines: {node: '>=12.0.0'}
peerDependencies:
@@ -1277,30 +1287,30 @@ packages:
optional: true
dependencies:
eslint: 8.22.0
- eslint-config-prettier: 8.5.0_eslint@8.22.0
+ eslint-config-prettier: 8.5.0(eslint@8.22.0)
prettier: 2.7.1
prettier-linter-helpers: 1.0.0
dev: true
- /eslint-plugin-vue/9.3.0_eslint@8.22.0:
+ /eslint-plugin-vue@9.3.0(eslint@8.22.0):
resolution: {integrity: sha512-iscKKkBZgm6fGZwFt6poRoWC0Wy2dQOlwUPW++CiPoQiw1enctV2Hj5DBzzjJZfyqs+FAXhgzL4q0Ww03AgSmQ==}
engines: {node: ^14.17.0 || >=16.0.0}
peerDependencies:
eslint: ^6.2.0 || ^7.0.0 || ^8.0.0
dependencies:
eslint: 8.22.0
- eslint-utils: 3.0.0_eslint@8.22.0
+ eslint-utils: 3.0.0(eslint@8.22.0)
natural-compare: 1.4.0
nth-check: 2.1.1
postcss-selector-parser: 6.0.10
semver: 7.3.7
- vue-eslint-parser: 9.0.3_eslint@8.22.0
+ vue-eslint-parser: 9.0.3(eslint@8.22.0)
xml-name-validator: 4.0.0
transitivePeerDependencies:
- supports-color
dev: true
- /eslint-scope/5.1.1:
+ /eslint-scope@5.1.1:
resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==}
engines: {node: '>=8.0.0'}
dependencies:
@@ -1308,7 +1318,7 @@ packages:
estraverse: 4.3.0
dev: true
- /eslint-scope/7.1.1:
+ /eslint-scope@7.1.1:
resolution: {integrity: sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
dependencies:
@@ -1316,7 +1326,7 @@ packages:
estraverse: 5.3.0
dev: true
- /eslint-utils/3.0.0_eslint@8.22.0:
+ /eslint-utils@3.0.0(eslint@8.22.0):
resolution: {integrity: sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==}
engines: {node: ^10.0.0 || ^12.0.0 || >= 14.0.0}
peerDependencies:
@@ -1326,17 +1336,17 @@ packages:
eslint-visitor-keys: 2.1.0
dev: true
- /eslint-visitor-keys/2.1.0:
+ /eslint-visitor-keys@2.1.0:
resolution: {integrity: sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==}
engines: {node: '>=10'}
dev: true
- /eslint-visitor-keys/3.3.0:
+ /eslint-visitor-keys@3.3.0:
resolution: {integrity: sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
dev: true
- /eslint/8.22.0:
+ /eslint@8.22.0:
resolution: {integrity: sha512-ci4t0sz6vSRKdmkOGmprBo6fmI4PrphDFMy5JEq/fNS0gQkJM3rLmrqcp8ipMcdobH3KtUP40KniAE9W19S4wA==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
hasBin: true
@@ -1351,7 +1361,7 @@ packages:
doctrine: 3.0.0
escape-string-regexp: 4.0.0
eslint-scope: 7.1.1
- eslint-utils: 3.0.0_eslint@8.22.0
+ eslint-utils: 3.0.0(eslint@8.22.0)
eslint-visitor-keys: 3.3.0
espree: 9.3.3
esquery: 1.4.0
@@ -1384,62 +1394,62 @@ packages:
- supports-color
dev: true
- /espree/9.3.3:
+ /espree@9.3.3:
resolution: {integrity: sha512-ORs1Rt/uQTqUKjDdGCyrtYxbazf5umATSf/K4qxjmZHORR6HJk+2s/2Pqe+Kk49HHINC/xNIrGfgh8sZcll0ng==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
dependencies:
acorn: 8.8.0
- acorn-jsx: 5.3.2_acorn@8.8.0
+ acorn-jsx: 5.3.2(acorn@8.8.0)
eslint-visitor-keys: 3.3.0
dev: true
- /esprima/4.0.1:
+ /esprima@4.0.1:
resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==}
engines: {node: '>=4'}
hasBin: true
dev: true
- /esquery/1.4.0:
+ /esquery@1.4.0:
resolution: {integrity: sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==}
engines: {node: '>=0.10'}
dependencies:
estraverse: 5.3.0
dev: true
- /esrecurse/4.3.0:
+ /esrecurse@4.3.0:
resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==}
engines: {node: '>=4.0'}
dependencies:
estraverse: 5.3.0
dev: true
- /estraverse/4.3.0:
+ /estraverse@4.3.0:
resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==}
engines: {node: '>=4.0'}
dev: true
- /estraverse/5.3.0:
+ /estraverse@5.3.0:
resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==}
engines: {node: '>=4.0'}
dev: true
- /estree-walker/2.0.2:
+ /estree-walker@2.0.2:
resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==}
- /esutils/2.0.3:
+ /esutils@2.0.3:
resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==}
engines: {node: '>=0.10.0'}
dev: true
- /fast-deep-equal/3.1.3:
+ /fast-deep-equal@3.1.3:
resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
dev: true
- /fast-diff/1.2.0:
+ /fast-diff@1.2.0:
resolution: {integrity: sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==}
dev: true
- /fast-glob/3.2.11:
+ /fast-glob@3.2.11:
resolution: {integrity: sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==}
engines: {node: '>=8.6.0'}
dependencies:
@@ -1450,35 +1460,35 @@ packages:
micromatch: 4.0.5
dev: true
- /fast-json-stable-stringify/2.1.0:
+ /fast-json-stable-stringify@2.1.0:
resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==}
dev: true
- /fast-levenshtein/2.0.6:
+ /fast-levenshtein@2.0.6:
resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==}
dev: true
- /fastq/1.13.0:
+ /fastq@1.13.0:
resolution: {integrity: sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==}
dependencies:
reusify: 1.0.4
dev: true
- /file-entry-cache/6.0.1:
+ /file-entry-cache@6.0.1:
resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
engines: {node: ^10.12.0 || >=12.0.0}
dependencies:
flat-cache: 3.0.4
dev: true
- /fill-range/7.0.1:
+ /fill-range@7.0.1:
resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==}
engines: {node: '>=8'}
dependencies:
to-regex-range: 5.0.1
dev: true
- /find-up/5.0.0:
+ /find-up@5.0.0:
resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==}
engines: {node: '>=10'}
dependencies:
@@ -1486,7 +1496,7 @@ packages:
path-exists: 4.0.0
dev: true
- /flat-cache/3.0.4:
+ /flat-cache@3.0.4:
resolution: {integrity: sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==}
engines: {node: ^10.12.0 || >=12.0.0}
dependencies:
@@ -1494,11 +1504,11 @@ packages:
rimraf: 3.0.2
dev: true
- /flatted/3.2.6:
+ /flatted@3.2.6:
resolution: {integrity: sha512-0sQoMh9s0BYsm+12Huy/rkKxVu4R1+r96YX5cG44rHV0pQ6iC3Q+mkoMFaGWObMFYQxCVT+ssG1ksneA2MI9KQ==}
dev: true
- /follow-redirects/1.15.1:
+ /follow-redirects@1.15.1:
resolution: {integrity: sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==}
engines: {node: '>=4.0'}
peerDependencies:
@@ -1508,7 +1518,7 @@ packages:
optional: true
dev: false
- /foreground-child/2.0.0:
+ /foreground-child@2.0.0:
resolution: {integrity: sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==}
engines: {node: '>=8.0.0'}
dependencies:
@@ -1516,7 +1526,7 @@ packages:
signal-exit: 3.0.7
dev: true
- /form-data/4.0.0:
+ /form-data@4.0.0:
resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==}
engines: {node: '>= 6'}
dependencies:
@@ -1524,11 +1534,11 @@ packages:
combined-stream: 1.0.8
mime-types: 2.1.35
- /fs.realpath/1.0.0:
+ /fs.realpath@1.0.0:
resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
dev: true
- /fsevents/2.3.2:
+ /fsevents@2.3.2:
resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
@@ -1536,38 +1546,38 @@ packages:
dev: true
optional: true
- /function-bind/1.1.1:
+ /function-bind@1.1.1:
resolution: {integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==}
dev: true
- /functional-red-black-tree/1.0.1:
+ /functional-red-black-tree@1.0.1:
resolution: {integrity: sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==}
dev: true
- /get-caller-file/2.0.5:
+ /get-caller-file@2.0.5:
resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==}
engines: {node: 6.* || 8.* || >= 10.*}
dev: true
- /get-func-name/2.0.0:
+ /get-func-name@2.0.0:
resolution: {integrity: sha512-Hm0ixYtaSZ/V7C8FJrtZIuBBI+iSgL+1Aq82zSu8VQNB4S3Gk8e7Qs3VwBDJAhmRZcFqkl3tQu36g/Foh5I5ig==}
dev: true
- /glob-parent/5.1.2:
+ /glob-parent@5.1.2:
resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
engines: {node: '>= 6'}
dependencies:
is-glob: 4.0.3
dev: true
- /glob-parent/6.0.2:
+ /glob-parent@6.0.2:
resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==}
engines: {node: '>=10.13.0'}
dependencies:
is-glob: 4.0.3
dev: true
- /glob/7.2.3:
+ /glob@7.2.3:
resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
dependencies:
fs.realpath: 1.0.0
@@ -1578,14 +1588,14 @@ packages:
path-is-absolute: 1.0.1
dev: true
- /globals/13.17.0:
+ /globals@13.17.0:
resolution: {integrity: sha512-1C+6nQRb1GwGMKm2dH/E7enFAMxGTmGI7/dEdhy/DNelv85w9B72t3uc5frtMNXIbzrarJJ/lTCjcaZwbLJmyw==}
engines: {node: '>=8'}
dependencies:
type-fest: 0.20.2
dev: true
- /globby/11.1.0:
+ /globby@11.1.0:
resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==}
engines: {node: '>=10'}
dependencies:
@@ -1597,34 +1607,34 @@ packages:
slash: 3.0.0
dev: true
- /grapheme-splitter/1.0.4:
+ /grapheme-splitter@1.0.4:
resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==}
dev: true
- /has-flag/4.0.0:
+ /has-flag@4.0.0:
resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==}
engines: {node: '>=8'}
dev: true
- /has/1.0.3:
+ /has@1.0.3:
resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==}
engines: {node: '>= 0.4.0'}
dependencies:
function-bind: 1.1.1
dev: true
- /html-encoding-sniffer/3.0.0:
+ /html-encoding-sniffer@3.0.0:
resolution: {integrity: sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==}
engines: {node: '>=12'}
dependencies:
whatwg-encoding: 2.0.0
dev: true
- /html-escaper/2.0.2:
+ /html-escaper@2.0.2:
resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==}
dev: true
- /http-proxy-agent/5.0.0:
+ /http-proxy-agent@5.0.0:
resolution: {integrity: sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==}
engines: {node: '>= 6'}
dependencies:
@@ -1635,7 +1645,7 @@ packages:
- supports-color
dev: true
- /https-proxy-agent/5.0.1:
+ /https-proxy-agent@5.0.1:
resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==}
engines: {node: '>= 6'}
dependencies:
@@ -1645,23 +1655,23 @@ packages:
- supports-color
dev: true
- /iconv-lite/0.6.3:
+ /iconv-lite@0.6.3:
resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==}
engines: {node: '>=0.10.0'}
dependencies:
safer-buffer: 2.1.2
dev: true
- /ignore/5.2.0:
+ /ignore@5.2.0:
resolution: {integrity: sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==}
engines: {node: '>= 4'}
dev: true
- /immutable/4.1.0:
+ /immutable@4.1.0:
resolution: {integrity: sha512-oNkuqVTA8jqG1Q6c+UglTOD1xhC1BtjKI7XkCXRkZHrN5m18/XsnUp8Q89GkQO/z+0WjonSvl0FLhDYftp46nQ==}
dev: true
- /import-fresh/3.3.0:
+ /import-fresh@3.3.0:
resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==}
engines: {node: '>=6'}
dependencies:
@@ -1669,71 +1679,71 @@ packages:
resolve-from: 4.0.0
dev: true
- /imurmurhash/0.1.4:
+ /imurmurhash@0.1.4:
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
engines: {node: '>=0.8.19'}
dev: true
- /inflight/1.0.6:
+ /inflight@1.0.6:
resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==}
dependencies:
once: 1.4.0
wrappy: 1.0.2
dev: true
- /inherits/2.0.4:
+ /inherits@2.0.4:
resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
dev: true
- /is-binary-path/2.1.0:
+ /is-binary-path@2.1.0:
resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==}
engines: {node: '>=8'}
dependencies:
binary-extensions: 2.2.0
dev: true
- /is-core-module/2.10.0:
+ /is-core-module@2.10.0:
resolution: {integrity: sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==}
dependencies:
has: 1.0.3
dev: true
- /is-extglob/2.1.1:
+ /is-extglob@2.1.1:
resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
engines: {node: '>=0.10.0'}
dev: true
- /is-fullwidth-code-point/3.0.0:
+ /is-fullwidth-code-point@3.0.0:
resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==}
engines: {node: '>=8'}
dev: true
- /is-glob/4.0.3:
+ /is-glob@4.0.3:
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
engines: {node: '>=0.10.0'}
dependencies:
is-extglob: 2.1.1
dev: true
- /is-number/7.0.0:
+ /is-number@7.0.0:
resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
engines: {node: '>=0.12.0'}
dev: true
- /is-potential-custom-element-name/1.0.1:
+ /is-potential-custom-element-name@1.0.1:
resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==}
dev: true
- /isexe/2.0.0:
+ /isexe@2.0.0:
resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==}
dev: true
- /istanbul-lib-coverage/3.2.0:
+ /istanbul-lib-coverage@3.2.0:
resolution: {integrity: sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==}
engines: {node: '>=8'}
dev: true
- /istanbul-lib-report/3.0.0:
+ /istanbul-lib-report@3.0.0:
resolution: {integrity: sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==}
engines: {node: '>=8'}
dependencies:
@@ -1742,7 +1752,7 @@ packages:
supports-color: 7.2.0
dev: true
- /istanbul-reports/3.1.5:
+ /istanbul-reports@3.1.5:
resolution: {integrity: sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==}
engines: {node: '>=8'}
dependencies:
@@ -1750,14 +1760,14 @@ packages:
istanbul-lib-report: 3.0.0
dev: true
- /js-yaml/4.1.0:
+ /js-yaml@4.1.0:
resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==}
hasBin: true
dependencies:
argparse: 2.0.1
dev: true
- /jsdom/20.0.0:
+ /jsdom@20.0.0:
resolution: {integrity: sha512-x4a6CKCgx00uCmP+QakBDFXwjAJ69IkkIWHmtmjd3wvXPcdOS44hfX2vqkOQrVrq8l9DhNNADZRXaCEWvgXtVA==}
engines: {node: '>=14'}
peerDependencies:
@@ -1799,15 +1809,15 @@ packages:
- utf-8-validate
dev: true
- /json-schema-traverse/0.4.1:
+ /json-schema-traverse@0.4.1:
resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
dev: true
- /json-stable-stringify-without-jsonify/1.0.1:
+ /json-stable-stringify-without-jsonify@1.0.1:
resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
dev: true
- /levn/0.3.0:
+ /levn@0.3.0:
resolution: {integrity: sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==}
engines: {node: '>= 0.8.0'}
dependencies:
@@ -1815,7 +1825,7 @@ packages:
type-check: 0.3.2
dev: true
- /levn/0.4.1:
+ /levn@0.4.1:
resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
engines: {node: '>= 0.8.0'}
dependencies:
@@ -1823,23 +1833,23 @@ packages:
type-check: 0.4.0
dev: true
- /local-pkg/0.4.2:
+ /local-pkg@0.4.2:
resolution: {integrity: sha512-mlERgSPrbxU3BP4qBqAvvwlgW4MTg78iwJdGGnv7kibKjWcJksrG3t6LB5lXI93wXRDvG4NpUgJFmTG4T6rdrg==}
engines: {node: '>=14'}
dev: true
- /locate-path/6.0.0:
+ /locate-path@6.0.0:
resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
engines: {node: '>=10'}
dependencies:
p-locate: 5.0.0
dev: true
- /lodash-es/4.17.21:
+ /lodash-es@4.17.21:
resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==}
dev: false
- /lodash-unified/1.0.2_3ib2ivapxullxkx3xftsimdk7u:
+ /lodash-unified@1.0.2(@types/lodash-es@4.17.6)(lodash-es@4.17.21)(lodash@4.17.21):
resolution: {integrity: sha512-OGbEy+1P+UT26CYi4opY4gebD8cWRDxAT6MAObIVQMiqYdxZr1g3QHWCToVsm31x2NkLS4K3+MC2qInaRMa39g==}
peerDependencies:
'@types/lodash-es': '*'
@@ -1851,48 +1861,48 @@ packages:
lodash-es: 4.17.21
dev: false
- /lodash.merge/4.6.2:
+ /lodash.merge@4.6.2:
resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
dev: true
- /lodash/4.17.21:
+ /lodash@4.17.21:
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
- /loupe/2.3.4:
+ /loupe@2.3.4:
resolution: {integrity: sha512-OvKfgCC2Ndby6aSTREl5aCCPTNIzlDfQZvZxNUrBrihDhL3xcrYegTblhmEiCrg2kKQz4XsFIaemE5BF4ybSaQ==}
dependencies:
get-func-name: 2.0.0
dev: true
- /lru-cache/6.0.0:
+ /lru-cache@6.0.0:
resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==}
engines: {node: '>=10'}
dependencies:
yallist: 4.0.0
dev: true
- /magic-string/0.25.9:
+ /magic-string@0.25.9:
resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==}
dependencies:
sourcemap-codec: 1.4.8
- /make-dir/3.1.0:
+ /make-dir@3.1.0:
resolution: {integrity: sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==}
engines: {node: '>=8'}
dependencies:
semver: 6.3.0
dev: true
- /memoize-one/6.0.0:
+ /memoize-one@6.0.0:
resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==}
dev: false
- /merge2/1.4.1:
+ /merge2@1.4.1:
resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==}
engines: {node: '>= 8'}
dev: true
- /micromatch/4.0.5:
+ /micromatch@4.0.5:
resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==}
engines: {node: '>=8.6'}
dependencies:
@@ -1900,61 +1910,61 @@ packages:
picomatch: 2.3.1
dev: true
- /mime-db/1.52.0:
+ /mime-db@1.52.0:
resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==}
engines: {node: '>= 0.6'}
- /mime-types/2.1.35:
+ /mime-types@2.1.35:
resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==}
engines: {node: '>= 0.6'}
dependencies:
mime-db: 1.52.0
- /minimatch/3.1.2:
+ /minimatch@3.1.2:
resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
dependencies:
brace-expansion: 1.1.11
dev: true
- /ms/2.1.2:
+ /ms@2.1.2:
resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==}
dev: true
- /nanoid/3.3.4:
+ /nanoid@3.3.4:
resolution: {integrity: sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==}
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
hasBin: true
- /natural-compare/1.4.0:
+ /natural-compare@1.4.0:
resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
dev: true
- /normalize-path/3.0.0:
+ /normalize-path@3.0.0:
resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==}
engines: {node: '>=0.10.0'}
dev: true
- /normalize-wheel-es/1.2.0:
+ /normalize-wheel-es@1.2.0:
resolution: {integrity: sha512-Wj7+EJQ8mSuXr2iWfnujrimU35R2W4FAErEyTmJoJ7ucwTn2hOUSsRehMb5RSYkxXGTM7Y9QpvPmp++w5ftoJw==}
dev: false
- /nth-check/2.1.1:
+ /nth-check@2.1.1:
resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==}
dependencies:
boolbase: 1.0.0
dev: true
- /nwsapi/2.2.1:
+ /nwsapi@2.2.1:
resolution: {integrity: sha512-JYOWTeFoS0Z93587vRJgASD5Ut11fYl5NyihP3KrYBvMe1FRRs6RN7m20SA/16GM4P6hTnZjT+UmDOt38UeXNg==}
dev: true
- /once/1.4.0:
+ /once@1.4.0:
resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
dependencies:
wrappy: 1.0.2
dev: true
- /optionator/0.8.3:
+ /optionator@0.8.3:
resolution: {integrity: sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==}
engines: {node: '>= 0.8.0'}
dependencies:
@@ -1966,7 +1976,7 @@ packages:
word-wrap: 1.2.3
dev: true
- /optionator/0.9.1:
+ /optionator@0.9.1:
resolution: {integrity: sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==}
engines: {node: '>= 0.8.0'}
dependencies:
@@ -1978,70 +1988,70 @@ packages:
word-wrap: 1.2.3
dev: true
- /p-limit/3.1.0:
+ /p-limit@3.1.0:
resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==}
engines: {node: '>=10'}
dependencies:
yocto-queue: 0.1.0
dev: true
- /p-locate/5.0.0:
+ /p-locate@5.0.0:
resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==}
engines: {node: '>=10'}
dependencies:
p-limit: 3.1.0
dev: true
- /parent-module/1.0.1:
+ /parent-module@1.0.1:
resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
engines: {node: '>=6'}
dependencies:
callsites: 3.1.0
dev: true
- /parse5/7.0.0:
+ /parse5@7.0.0:
resolution: {integrity: sha512-y/t8IXSPWTuRZqXc0ajH/UwDj4mnqLEbSttNbThcFhGrZuOyoyvNBO85PBp2jQa55wY9d07PBNjsK8ZP3K5U6g==}
dependencies:
entities: 4.3.1
dev: true
- /path-exists/4.0.0:
+ /path-exists@4.0.0:
resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
engines: {node: '>=8'}
dev: true
- /path-is-absolute/1.0.1:
+ /path-is-absolute@1.0.1:
resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
engines: {node: '>=0.10.0'}
dev: true
- /path-key/3.1.1:
+ /path-key@3.1.1:
resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
engines: {node: '>=8'}
dev: true
- /path-parse/1.0.7:
+ /path-parse@1.0.7:
resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
dev: true
- /path-type/4.0.0:
+ /path-type@4.0.0:
resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==}
engines: {node: '>=8'}
dev: true
- /pathval/1.1.1:
+ /pathval@1.1.1:
resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==}
dev: true
- /picocolors/1.0.0:
+ /picocolors@1.0.0:
resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==}
- /picomatch/2.3.1:
+ /picomatch@2.3.1:
resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
engines: {node: '>=8.6'}
dev: true
- /pinia-plugin-persistedstate/2.1.1_pinia@2.0.18:
+ /pinia-plugin-persistedstate@2.1.1(pinia@2.0.18):
resolution: {integrity: sha512-HUgsU5IRtM75eAQiIqzT3p1oPEuYH1/B2ipTMU++yE+FV0LkHaBswdKXs0RMWYCmugO8s62oxLTh/N1dLNp+5A==}
peerDependencies:
pinia: ^2.0.0
@@ -2049,10 +2059,10 @@ packages:
pinia:
optional: true
dependencies:
- pinia: 2.0.18_j6bzmzd4ujpabbp5objtwxyjp4
+ pinia: 2.0.18(typescript@4.7.4)(vue@3.2.37)
dev: false
- /pinia/2.0.18_j6bzmzd4ujpabbp5objtwxyjp4:
+ /pinia@2.0.18(typescript@4.7.4)(vue@3.2.37):
resolution: {integrity: sha512-I5MW05UVX6a5Djka136oH3VzYFiZUgeOApBwFjMx6pL91eHtGVlE3adjNUKLgtwGnrxiBRuJ8+4R3LKJKwnyZg==}
peerDependencies:
'@vue/composition-api': ^1.4.0
@@ -2067,10 +2077,10 @@ packages:
'@vue/devtools-api': 6.2.1
typescript: 4.7.4
vue: 3.2.37
- vue-demi: 0.13.8_vue@3.2.37
+ vue-demi: 0.13.8(vue@3.2.37)
dev: false
- /postcss-selector-parser/6.0.10:
+ /postcss-selector-parser@6.0.10:
resolution: {integrity: sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==}
engines: {node: '>=4'}
dependencies:
@@ -2078,7 +2088,7 @@ packages:
util-deprecate: 1.0.2
dev: true
- /postcss/8.4.16:
+ /postcss@8.4.16:
resolution: {integrity: sha512-ipHE1XBvKzm5xI7hiHCZJCSugxvsdq2mPnsq5+UF+VHCjiBvtDrlxJfMBToWaP9D5XlgNmcFGqoHmUn0EYEaRQ==}
engines: {node: ^10 || ^12 || >=14}
dependencies:
@@ -2086,65 +2096,65 @@ packages:
picocolors: 1.0.0
source-map-js: 1.0.2
- /prelude-ls/1.1.2:
+ /prelude-ls@1.1.2:
resolution: {integrity: sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==}
engines: {node: '>= 0.8.0'}
dev: true
- /prelude-ls/1.2.1:
+ /prelude-ls@1.2.1:
resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==}
engines: {node: '>= 0.8.0'}
dev: true
- /prettier-linter-helpers/1.0.0:
+ /prettier-linter-helpers@1.0.0:
resolution: {integrity: sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==}
engines: {node: '>=6.0.0'}
dependencies:
fast-diff: 1.2.0
dev: true
- /prettier/2.7.1:
+ /prettier@2.7.1:
resolution: {integrity: sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g==}
engines: {node: '>=10.13.0'}
hasBin: true
dev: true
- /psl/1.9.0:
+ /psl@1.9.0:
resolution: {integrity: sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==}
dev: true
- /punycode/2.1.1:
+ /punycode@2.1.1:
resolution: {integrity: sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==}
engines: {node: '>=6'}
dev: true
- /queue-microtask/1.2.3:
+ /queue-microtask@1.2.3:
resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
dev: true
- /readdirp/3.6.0:
+ /readdirp@3.6.0:
resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
engines: {node: '>=8.10.0'}
dependencies:
picomatch: 2.3.1
dev: true
- /regexpp/3.2.0:
+ /regexpp@3.2.0:
resolution: {integrity: sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==}
engines: {node: '>=8'}
dev: true
- /require-directory/2.1.1:
+ /require-directory@2.1.1:
resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==}
engines: {node: '>=0.10.0'}
dev: true
- /resolve-from/4.0.0:
+ /resolve-from@4.0.0:
resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==}
engines: {node: '>=4'}
dev: true
- /resolve/1.22.1:
+ /resolve@1.22.1:
resolution: {integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==}
hasBin: true
dependencies:
@@ -2153,19 +2163,19 @@ packages:
supports-preserve-symlinks-flag: 1.0.0
dev: true
- /reusify/1.0.4:
+ /reusify@1.0.4:
resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==}
engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
dev: true
- /rimraf/3.0.2:
+ /rimraf@3.0.2:
resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==}
hasBin: true
dependencies:
glob: 7.2.3
dev: true
- /rollup/2.77.3:
+ /rollup@2.77.3:
resolution: {integrity: sha512-/qxNTG7FbmefJWoeeYJFbHehJ2HNWnjkAFRKzWN/45eNBBF/r8lo992CwcJXEzyVxs5FmfId+vTSTQDb+bxA+g==}
engines: {node: '>=10.0.0'}
hasBin: true
@@ -2173,21 +2183,21 @@ packages:
fsevents: 2.3.2
dev: true
- /run-parallel/1.2.0:
+ /run-parallel@1.2.0:
resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
dependencies:
queue-microtask: 1.2.3
dev: true
- /safe-buffer/5.1.2:
+ /safe-buffer@5.1.2:
resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==}
dev: true
- /safer-buffer/2.1.2:
+ /safer-buffer@2.1.2:
resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==}
dev: true
- /sass/1.54.4:
+ /sass@1.54.4:
resolution: {integrity: sha512-3tmF16yvnBwtlPrNBHw/H907j8MlOX8aTBnlNX1yrKx24RKcJGPyLhFUwkoKBKesR3unP93/2z14Ll8NicwQUA==}
engines: {node: '>=12.0.0'}
hasBin: true
@@ -2197,19 +2207,19 @@ packages:
source-map-js: 1.0.2
dev: true
- /saxes/6.0.0:
+ /saxes@6.0.0:
resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==}
engines: {node: '>=v12.22.7'}
dependencies:
xmlchars: 2.2.0
dev: true
- /semver/6.3.0:
+ /semver@6.3.0:
resolution: {integrity: sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==}
hasBin: true
dev: true
- /semver/7.3.7:
+ /semver@7.3.7:
resolution: {integrity: sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==}
engines: {node: '>=10'}
hasBin: true
@@ -2217,39 +2227,39 @@ packages:
lru-cache: 6.0.0
dev: true
- /shebang-command/2.0.0:
+ /shebang-command@2.0.0:
resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==}
engines: {node: '>=8'}
dependencies:
shebang-regex: 3.0.0
dev: true
- /shebang-regex/3.0.0:
+ /shebang-regex@3.0.0:
resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==}
engines: {node: '>=8'}
dev: true
- /signal-exit/3.0.7:
+ /signal-exit@3.0.7:
resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==}
dev: true
- /slash/3.0.0:
+ /slash@3.0.0:
resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==}
engines: {node: '>=8'}
dev: true
- /source-map-js/1.0.2:
+ /source-map-js@1.0.2:
resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==}
engines: {node: '>=0.10.0'}
- /source-map/0.6.1:
+ /source-map@0.6.1:
resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==}
engines: {node: '>=0.10.0'}
- /sourcemap-codec/1.4.8:
+ /sourcemap-codec@1.4.8:
resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==}
- /string-width/4.2.3:
+ /string-width@4.2.3:
resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
engines: {node: '>=8'}
dependencies:
@@ -2258,35 +2268,35 @@ packages:
strip-ansi: 6.0.1
dev: true
- /strip-ansi/6.0.1:
+ /strip-ansi@6.0.1:
resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==}
engines: {node: '>=8'}
dependencies:
ansi-regex: 5.0.1
dev: true
- /strip-json-comments/3.1.1:
+ /strip-json-comments@3.1.1:
resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==}
engines: {node: '>=8'}
dev: true
- /supports-color/7.2.0:
+ /supports-color@7.2.0:
resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==}
engines: {node: '>=8'}
dependencies:
has-flag: 4.0.0
dev: true
- /supports-preserve-symlinks-flag/1.0.0:
+ /supports-preserve-symlinks-flag@1.0.0:
resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==}
engines: {node: '>= 0.4'}
dev: true
- /symbol-tree/3.2.4:
+ /symbol-tree@3.2.4:
resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==}
dev: true
- /test-exclude/6.0.0:
+ /test-exclude@6.0.0:
resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==}
engines: {node: '>=8'}
dependencies:
@@ -2295,32 +2305,32 @@ packages:
minimatch: 3.1.2
dev: true
- /text-table/0.2.0:
+ /text-table@0.2.0:
resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
dev: true
- /tinypool/0.2.4:
+ /tinypool@0.2.4:
resolution: {integrity: sha512-Vs3rhkUH6Qq1t5bqtb816oT+HeJTXfwt2cbPH17sWHIYKTotQIFPk3tf2fgqRrVyMDVOc1EnPgzIxfIulXVzwQ==}
engines: {node: '>=14.0.0'}
dev: true
- /tinyspy/1.0.0:
+ /tinyspy@1.0.0:
resolution: {integrity: sha512-FI5B2QdODQYDRjfuLF+OrJ8bjWRMCXokQPcwKm0W3IzcbUmBNv536cQc7eXGoAuXphZwgx1DFbqImwzz08Fnhw==}
engines: {node: '>=14.0.0'}
dev: true
- /to-fast-properties/2.0.0:
+ /to-fast-properties@2.0.0:
resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==}
engines: {node: '>=4'}
- /to-regex-range/5.0.1:
+ /to-regex-range@5.0.1:
resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
engines: {node: '>=8.0'}
dependencies:
is-number: 7.0.0
dev: true
- /tough-cookie/4.0.0:
+ /tough-cookie@4.0.0:
resolution: {integrity: sha512-tHdtEpQCMrc1YLrMaqXXcj6AxhYi/xgit6mZu1+EDWUn+qhUf8wMQoFIy9NXuq23zAwtcB0t/MjACGR18pcRbg==}
engines: {node: '>=6'}
dependencies:
@@ -2329,18 +2339,18 @@ packages:
universalify: 0.1.2
dev: true
- /tr46/3.0.0:
+ /tr46@3.0.0:
resolution: {integrity: sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==}
engines: {node: '>=12'}
dependencies:
punycode: 2.1.1
dev: true
- /tslib/1.14.1:
+ /tslib@1.14.1:
resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==}
dev: true
- /tsutils/3.21.0_typescript@4.7.4:
+ /tsutils@3.21.0(typescript@4.7.4):
resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==}
engines: {node: '>= 6'}
peerDependencies:
@@ -2350,55 +2360,55 @@ packages:
typescript: 4.7.4
dev: true
- /type-check/0.3.2:
+ /type-check@0.3.2:
resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==}
engines: {node: '>= 0.8.0'}
dependencies:
prelude-ls: 1.1.2
dev: true
- /type-check/0.4.0:
+ /type-check@0.4.0:
resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==}
engines: {node: '>= 0.8.0'}
dependencies:
prelude-ls: 1.2.1
dev: true
- /type-detect/4.0.8:
+ /type-detect@4.0.8:
resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==}
engines: {node: '>=4'}
dev: true
- /type-fest/0.20.2:
+ /type-fest@0.20.2:
resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==}
engines: {node: '>=10'}
dev: true
- /typescript/4.7.4:
+ /typescript@4.7.4:
resolution: {integrity: sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==}
engines: {node: '>=4.2.0'}
hasBin: true
- /universalify/0.1.2:
+ /universalify@0.1.2:
resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==}
engines: {node: '>= 4.0.0'}
dev: true
- /uri-js/4.4.1:
+ /uri-js@4.4.1:
resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==}
dependencies:
punycode: 2.1.1
dev: true
- /util-deprecate/1.0.2:
+ /util-deprecate@1.0.2:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
dev: true
- /v8-compile-cache/2.3.0:
+ /v8-compile-cache@2.3.0:
resolution: {integrity: sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==}
dev: true
- /v8-to-istanbul/9.0.1:
+ /v8-to-istanbul@9.0.1:
resolution: {integrity: sha512-74Y4LqY74kLE6IFyIjPtkSTWzUZmj8tdHT9Ii/26dvQ6K9Dl2NbEfj0XgU2sHCtKgt5VupqhlO/5aWuqS+IY1w==}
engines: {node: '>=10.12.0'}
dependencies:
@@ -2407,7 +2417,7 @@ packages:
convert-source-map: 1.8.0
dev: true
- /vite/3.0.8_sass@1.54.4:
+ /vite@3.0.8(sass@1.54.4):
resolution: {integrity: sha512-AOZ4eN7mrkJiOLuw8IA7piS4IdOQyQCA81GxGsAQvAZzMRi9ZwGB3TOaYsj4uLAWK46T5L4AfQ6InNGlxX30IQ==}
engines: {node: ^14.18.0 || >=16.0.0}
hasBin: true
@@ -2435,7 +2445,7 @@ packages:
fsevents: 2.3.2
dev: true
- /vitest/0.22.0_jsdom@20.0.0+sass@1.54.4:
+ /vitest@0.22.0(jsdom@20.0.0)(sass@1.54.4):
resolution: {integrity: sha512-BSIro/QOHLaQY08FHwT6THWhqLQ+VPU+N4Rdo4pcP+16XB6oLmNNAXGcSh/MOLUhfUy+mqCwx7AyKmU7Ms5R+g==}
engines: {node: '>=v14.16.0'}
hasBin: true
@@ -2466,7 +2476,7 @@ packages:
local-pkg: 0.4.2
tinypool: 0.2.4
tinyspy: 1.0.0
- vite: 3.0.8_sass@1.54.4
+ vite: 3.0.8(sass@1.54.4)
transitivePeerDependencies:
- less
- sass
@@ -2475,7 +2485,7 @@ packages:
- terser
dev: true
- /vue-demi/0.13.8_vue@3.2.37:
+ /vue-demi@0.13.8(vue@3.2.37):
resolution: {integrity: sha512-Vy1zbZhCOdsmvGR6tJhAvO5vhP7eiS8xkbYQSoVa7o6KlIy3W8Rc53ED4qI4qpeRDjv3mLfXSEpYU6Yq4pgXRg==}
engines: {node: '>=12'}
hasBin: true
@@ -2490,7 +2500,7 @@ packages:
vue: 3.2.37
dev: false
- /vue-eslint-parser/9.0.3_eslint@8.22.0:
+ /vue-eslint-parser@9.0.3(eslint@8.22.0):
resolution: {integrity: sha512-yL+ZDb+9T0ELG4VIFo/2anAOz8SvBdlqEnQnvJ3M7Scq56DvtjY0VY88bByRZB0D4J0u8olBcfrXTVONXsh4og==}
engines: {node: ^14.17.0 || >=16.0.0}
peerDependencies:
@@ -2508,7 +2518,7 @@ packages:
- supports-color
dev: true
- /vue-i18n/9.2.2_vue@3.2.37:
+ /vue-i18n@9.2.2(vue@3.2.37):
resolution: {integrity: sha512-yswpwtj89rTBhegUAv9Mu37LNznyu3NpyLQmozF3i1hYOhwpG8RjcjIFIIfnu+2MDZJGSZPXaKWvnQA71Yv9TQ==}
engines: {node: '>= 14'}
peerDependencies:
@@ -2521,7 +2531,7 @@ packages:
vue: 3.2.37
dev: false
- /vue-router/4.1.3_vue@3.2.37:
+ /vue-router@4.1.3(vue@3.2.37):
resolution: {integrity: sha512-XvK81bcYglKiayT7/vYAg/f36ExPC4t90R/HIpzrZ5x+17BOWptXLCrEPufGgZeuq68ww4ekSIMBZY1qdUdfjA==}
peerDependencies:
vue: ^3.2.0
@@ -2530,7 +2540,7 @@ packages:
vue: 3.2.37
dev: false
- /vue-tsc/0.38.9_typescript@4.7.4:
+ /vue-tsc@0.38.9(typescript@4.7.4):
resolution: {integrity: sha512-Yoy5phgvGqyF98Fb4mYqboR4Q149jrdcGv5kSmufXJUq++RZJ2iMVG0g6zl+v3t4ORVWkQmRpsV4x2szufZ0LQ==}
hasBin: true
peerDependencies:
@@ -2540,46 +2550,46 @@ packages:
typescript: 4.7.4
dev: true
- /vue/3.2.37:
+ /vue@3.2.37:
resolution: {integrity: sha512-bOKEZxrm8Eh+fveCqS1/NkG/n6aMidsI6hahas7pa0w/l7jkbssJVsRhVDs07IdDq7h9KHswZOgItnwJAgtVtQ==}
dependencies:
'@vue/compiler-dom': 3.2.37
'@vue/compiler-sfc': 3.2.37
'@vue/runtime-dom': 3.2.37
- '@vue/server-renderer': 3.2.37_vue@3.2.37
+ '@vue/server-renderer': 3.2.37(vue@3.2.37)
'@vue/shared': 3.2.37
- /w3c-hr-time/1.0.2:
+ /w3c-hr-time@1.0.2:
resolution: {integrity: sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ==}
dependencies:
browser-process-hrtime: 1.0.0
dev: true
- /w3c-xmlserializer/3.0.0:
+ /w3c-xmlserializer@3.0.0:
resolution: {integrity: sha512-3WFqGEgSXIyGhOmAFtlicJNMjEps8b1MG31NCA0/vOF9+nKMUW1ckhi9cnNHmf88Rzw5V+dwIwsm2C7X8k9aQg==}
engines: {node: '>=12'}
dependencies:
xml-name-validator: 4.0.0
dev: true
- /webidl-conversions/7.0.0:
+ /webidl-conversions@7.0.0:
resolution: {integrity: sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==}
engines: {node: '>=12'}
dev: true
- /whatwg-encoding/2.0.0:
+ /whatwg-encoding@2.0.0:
resolution: {integrity: sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==}
engines: {node: '>=12'}
dependencies:
iconv-lite: 0.6.3
dev: true
- /whatwg-mimetype/3.0.0:
+ /whatwg-mimetype@3.0.0:
resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==}
engines: {node: '>=12'}
dev: true
- /whatwg-url/11.0.0:
+ /whatwg-url@11.0.0:
resolution: {integrity: sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==}
engines: {node: '>=12'}
dependencies:
@@ -2587,7 +2597,7 @@ packages:
webidl-conversions: 7.0.0
dev: true
- /which/2.0.2:
+ /which@2.0.2:
resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==}
engines: {node: '>= 8'}
hasBin: true
@@ -2595,12 +2605,12 @@ packages:
isexe: 2.0.0
dev: true
- /word-wrap/1.2.3:
+ /word-wrap@1.2.3:
resolution: {integrity: sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==}
engines: {node: '>=0.10.0'}
dev: true
- /wrap-ansi/7.0.0:
+ /wrap-ansi@7.0.0:
resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
engines: {node: '>=10'}
dependencies:
@@ -2609,11 +2619,11 @@ packages:
strip-ansi: 6.0.1
dev: true
- /wrappy/1.0.2:
+ /wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
dev: true
- /ws/8.8.1:
+ /ws@8.8.1:
resolution: {integrity: sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==}
engines: {node: '>=10.0.0'}
peerDependencies:
@@ -2626,30 +2636,30 @@ packages:
optional: true
dev: true
- /xml-name-validator/4.0.0:
+ /xml-name-validator@4.0.0:
resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==}
engines: {node: '>=12'}
dev: true
- /xmlchars/2.2.0:
+ /xmlchars@2.2.0:
resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==}
dev: true
- /y18n/5.0.8:
+ /y18n@5.0.8:
resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==}
engines: {node: '>=10'}
dev: true
- /yallist/4.0.0:
+ /yallist@4.0.0:
resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==}
dev: true
- /yargs-parser/20.2.9:
+ /yargs-parser@20.2.9:
resolution: {integrity: sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==}
engines: {node: '>=10'}
dev: true
- /yargs/16.2.0:
+ /yargs@16.2.0:
resolution: {integrity: sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==}
engines: {node: '>=10'}
dependencies:
@@ -2662,7 +2672,7 @@ packages:
yargs-parser: 20.2.9
dev: true
- /yocto-queue/0.1.0:
+ /yocto-queue@0.1.0:
resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
engines: {node: '>=10'}
dev: true
diff --git a/pom.xml b/pom.xml
index 9ff452a4828..4ce11caf5ed 100644
--- a/pom.xml
+++ b/pom.xml
@@ -207,7 +207,7 @@
truev16.19.1
- v7.27.1
+ v8.4.02.12
From 0875ce70664880d4e065897c2807eb0798a62557 Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Wed, 3 May 2023 18:42:23 +0800
Subject: [PATCH 089/404] [KYUUBI #4784] Use nodejs v18 in CI builds and maven
web-ui builds
### _Why are the changes needed?_
- nodejs v18 is the current active LTS version of nodejs since October 2022, while v16 is in maintenance mode and no longer in the active status
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
This patch had conflicts when merged, resolved by
Committer: Cheng Pan
Closes #4784 from bowenliang123/nodejs18.
Closes #4784
483c3ac56 [liangbowen] use nodejs v18 in ci builds and maven builds
Authored-by: liangbowen
Signed-off-by: Cheng Pan
---
.github/workflows/style.yml | 2 +-
.github/workflows/web-ui.yml | 2 +-
pom.xml | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index e2e6fd70e0b..479b74e3632 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -89,7 +89,7 @@ jobs:
- name: setup npm
uses: actions/setup-node@v3
with:
- node-version: 16
+ node-version: 18
- name: Web UI Style with node
run: |
cd ./kyuubi-server/web-ui
diff --git a/.github/workflows/web-ui.yml b/.github/workflows/web-ui.yml
index 65fb4759453..13d072321cb 100644
--- a/.github/workflows/web-ui.yml
+++ b/.github/workflows/web-ui.yml
@@ -24,7 +24,7 @@ jobs:
- name: setup npm
uses: actions/setup-node@v3
with:
- node-version: 16
+ node-version: 18
- name: npm run coverage & build
run: |
cd ./kyuubi-server/web-ui
diff --git a/pom.xml b/pom.xml
index 4ce11caf5ed..af56eb1672f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -206,7 +206,7 @@
true
- v16.19.1
+ v18.16.0v8.4.0
From 66de0ad8a040b809908bfb44e70502650e736de0 Mon Sep 17 00:00:00 2001
From: fwang12
Date: Wed, 3 May 2023 20:24:59 +0800
Subject: [PATCH 090/404] [KYUUBI #4780] Get engine application info with
interval to prevent frequent call to resource manager
### _Why are the changes needed?_
To prevent frequent call to resource manager.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4780 from turboFei/engine_ref.
Closes #4780
09f67c699 [fwang12] re-order
88c1cb33c [fwang12] sleep
Authored-by: fwang12
Signed-off-by: Cheng Pan
---
.../org/apache/kyuubi/engine/EngineRef.scala | 39 ++++++++++++-------
1 file changed, 24 insertions(+), 15 deletions(-)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
index b2b3ce9096a..765f3694965 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
@@ -206,10 +206,11 @@ private[kyuubi] class EngineRef(
builder.validateConf
val process = builder.start
var exitValue: Option[Int] = None
+ var lastApplicationInfo: Option[ApplicationInfo] = None
while (engineRef.isEmpty) {
if (exitValue.isEmpty && process.waitFor(1, TimeUnit.SECONDS)) {
exitValue = Some(process.exitValue())
- if (exitValue.get != 0) {
+ if (exitValue != Some(0)) {
val error = builder.getError
MetricsSystem.tracing { ms =>
ms.incCount(MetricRegistry.name(ENGINE_FAIL, appUser))
@@ -219,14 +220,31 @@ private[kyuubi] class EngineRef(
}
}
+ if (started + timeout <= System.currentTimeMillis()) {
+ val killMessage = engineManager.killApplication(builder.clusterManager(), engineRefId)
+ process.destroyForcibly()
+ MetricsSystem.tracing(_.incCount(MetricRegistry.name(ENGINE_TIMEOUT, appUser)))
+ throw KyuubiSQLException(
+ s"Timeout($timeout ms, you can modify ${ENGINE_INIT_TIMEOUT.key} to change it) to" +
+ s" launched $engineType engine with $redactedCmd. $killMessage",
+ builder.getError)
+ }
+ engineRef = discoveryClient.getEngineByRefId(engineSpace, engineRefId)
+
// even the submit process succeeds, the application might meet failure when initializing,
// check the engine application state from engine manager and fast fail on engine terminate
- if (exitValue == Some(0)) {
+ if (engineRef.isEmpty && exitValue == Some(0)) {
Option(engineManager).foreach { engineMgr =>
- engineMgr.getApplicationInfo(
+ if (lastApplicationInfo.isDefined) {
+ TimeUnit.SECONDS.sleep(1)
+ }
+
+ val applicationInfo = engineMgr.getApplicationInfo(
builder.clusterManager(),
engineRefId,
- Some(started)).foreach { appInfo =>
+ Some(started))
+
+ applicationInfo.foreach { appInfo =>
if (ApplicationState.isTerminated(appInfo.state)) {
MetricsSystem.tracing { ms =>
ms.incCount(MetricRegistry.name(ENGINE_FAIL, appUser))
@@ -240,19 +258,10 @@ private[kyuubi] class EngineRef(
builder.getError)
}
}
- }
- }
- if (started + timeout <= System.currentTimeMillis()) {
- val killMessage = engineManager.killApplication(builder.clusterManager(), engineRefId)
- process.destroyForcibly()
- MetricsSystem.tracing(_.incCount(MetricRegistry.name(ENGINE_TIMEOUT, appUser)))
- throw KyuubiSQLException(
- s"Timeout($timeout ms, you can modify ${ENGINE_INIT_TIMEOUT.key} to change it) to" +
- s" launched $engineType engine with $redactedCmd. $killMessage",
- builder.getError)
+ lastApplicationInfo = applicationInfo
+ }
}
- engineRef = discoveryClient.getEngineByRefId(engineSpace, engineRefId)
}
engineRef.get
} finally {
From dcb444e2d8e44b30eb6cad74ae8b75b2e54b52da Mon Sep 17 00:00:00 2001
From: Paul Lin
Date: Wed, 3 May 2023 21:51:21 +0800
Subject: [PATCH 091/404] [KYUUBI #4495] Support Flink job management
statements
### _Why are the changes needed?_
Support Flink job management statements.
### _How was this patch tested?_
- [x] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4774 from link3280/KYUUBI-4495.
Closes #4495
a4aaebcbb [Paul Lin] [KYUUBI #4495] Adjust the order of tests
225a6cdbd [Paul Lin] [KYUUBI #4495] Increase the number of taskmanagers in the mini cluster
67935ac24 [Paul Lin] [KYUUBI #4495] Wait jobs to get ready for show job statements
9c4ce1d6e [Paul Lin] [KYUUBI #4495] Fix show jobs assertion error
ab3113cab [Paul Lin] [KYUUBI #4495] Support Flink job management statements
Authored-by: Paul Lin
Signed-off-by: Cheng Pan
---
.../flink/WithFlinkSQLEngineLocal.scala | 1 +
.../engine/flink/WithFlinkTestResources.scala | 5 +-
.../flink/operation/FlinkOperationSuite.scala | 70 ++++++++++++++++++-
3 files changed, 73 insertions(+), 3 deletions(-)
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
index 0001f31aebb..92c1bcd83fc 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkSQLEngineLocal.scala
@@ -184,6 +184,7 @@ trait WithFlinkSQLEngineLocal extends KyuubiFunSuite with WithFlinkTestResources
val cfg = new MiniClusterConfiguration.Builder()
.setConfiguration(flinkConfig)
.setNumSlotsPerTaskManager(1)
+ .setNumTaskManagers(2)
.build
miniCluster = new MiniCluster(cfg)
miniCluster.start()
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala
index 3ea02774eb3..3b1d65cb233 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/WithFlinkTestResources.scala
@@ -41,6 +41,9 @@ trait WithFlinkTestResources {
GENERATED_UDF_CLASS,
GENERATED_UDF_CODE)
+ protected val savepointDir: File = Utils.createTempDir("savepoints").toFile
+
protected val testExtraConf: Map[String, String] = Map(
- "flink.pipeline.name" -> "test-job")
+ "flink.pipeline.name" -> "test-job",
+ "flink.state.savepoints.dir" -> savepointDir.toURI.toString)
}
diff --git a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
index 00e26c528f7..39d17aa7b1d 100644
--- a/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
+++ b/externals/kyuubi-flink-sql-engine/src/test/scala/org/apache/kyuubi/engine/flink/operation/FlinkOperationSuite.scala
@@ -17,6 +17,7 @@
package org.apache.kyuubi.engine.flink.operation
+import java.nio.file.Paths
import java.sql.DatabaseMetaData
import java.util.UUID
@@ -33,6 +34,7 @@ import org.apache.kyuubi.engine.flink.{FlinkEngineUtils, WithFlinkTestResources}
import org.apache.kyuubi.engine.flink.result.Constants
import org.apache.kyuubi.engine.flink.util.TestUserClassLoaderJar
import org.apache.kyuubi.jdbc.hive.KyuubiStatement
+import org.apache.kyuubi.jdbc.hive.common.TimestampTZ
import org.apache.kyuubi.operation.HiveJDBCTestHelper
import org.apache.kyuubi.operation.meta.ResultSetSchemaConstant._
@@ -632,6 +634,62 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
}
}
+ test("execute statement - show/stop jobs") {
+ if (FlinkEngineUtils.isFlinkVersionAtLeast("1.17")) {
+ withSessionConf()(Map(ENGINE_FLINK_MAX_ROWS.key -> "10"))(Map.empty) {
+ withMultipleConnectionJdbcStatement()({ statement =>
+ statement.executeQuery(
+ "create table tbl_a (a int) with (" +
+ "'connector' = 'datagen', " +
+ "'rows-per-second'='10')")
+ statement.executeQuery("create table tbl_b (a int) with ('connector' = 'blackhole')")
+ val insertResult1 = statement.executeQuery("insert into tbl_b select * from tbl_a")
+ assert(insertResult1.next())
+ val jobId1 = insertResult1.getString(1)
+
+ Thread.sleep(5000)
+
+ val showResult = statement.executeQuery("show jobs")
+ val metadata = showResult.getMetaData
+ assert(metadata.getColumnName(1) === "job id")
+ assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR)
+ assert(metadata.getColumnName(2) === "job name")
+ assert(metadata.getColumnType(2) === java.sql.Types.VARCHAR)
+ assert(metadata.getColumnName(3) === "status")
+ assert(metadata.getColumnType(3) === java.sql.Types.VARCHAR)
+ assert(metadata.getColumnName(4) === "start time")
+ assert(metadata.getColumnType(4) === java.sql.Types.OTHER)
+
+ var isFound = false
+ while (showResult.next()) {
+ if (showResult.getString(1) === jobId1) {
+ isFound = true
+ assert(showResult.getString(2) === "test-job")
+ assert(showResult.getString(3) === "RUNNING")
+ assert(showResult.getObject(4).isInstanceOf[TimestampTZ])
+ }
+ }
+ assert(isFound)
+
+ val stopResult1 = statement.executeQuery(s"stop job '$jobId1'")
+ assert(stopResult1.next())
+ assert(stopResult1.getString(1) === "OK")
+
+ val selectResult = statement.executeQuery("select * from tbl_a")
+ val jobId2 = statement.asInstanceOf[KyuubiStatement].getQueryId
+ assert(jobId2 !== null)
+ while (!selectResult.next()) {
+ Thread.sleep(1000L)
+ }
+ val stopResult2 = statement.executeQuery(s"stop job '$jobId2' with savepoint")
+ assert(stopResult2.getMetaData.getColumnName(1).equals("savepoint path"))
+ assert(stopResult2.next())
+ assert(Paths.get(stopResult2.getString(1)).getFileName.toString.startsWith("savepoint-"))
+ })
+ }
+ }
+ }
+
test("execute statement - select column name with dots") {
withJdbcStatement() { statement =>
val resultSet = statement.executeQuery("select 'tmp.hello'")
@@ -994,7 +1052,14 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
assert(metadata.getColumnName(1) === "job id")
assert(metadata.getColumnType(1) === java.sql.Types.VARCHAR)
assert(resultSet.next())
- assert(resultSet.getString(1).length == 32)
+ val jobId = resultSet.getString(1)
+ assert(jobId.length == 32)
+
+ if (FlinkEngineUtils.isFlinkVersionAtLeast("1.17")) {
+ val stopResult = statement.executeQuery(s"stop job '$jobId'")
+ assert(stopResult.next())
+ assert(stopResult.getString(1) === "OK")
+ }
})
}
@@ -1072,7 +1137,8 @@ abstract class FlinkOperationSuite extends HiveJDBCTestHelper with WithFlinkTest
test("ensure result max rows") {
withSessionConf()(Map(ENGINE_FLINK_MAX_ROWS.key -> "200"))(Map.empty) {
withJdbcStatement() { statement =>
- statement.execute("create table tbl_src (a bigint) with ('connector' = 'datagen')")
+ statement.execute("create table tbl_src (a bigint) with (" +
+ "'connector' = 'datagen', 'number-of-rows' = '1000')")
val resultSet = statement.executeQuery(s"select a from tbl_src")
var rows = 0
while (resultSet.next()) {
From 07e26a85e0f4e7722a7c37d195e660751efee178 Mon Sep 17 00:00:00 2001
From: fwang12
Date: Fri, 5 May 2023 10:07:22 +0800
Subject: [PATCH 092/404] [KYUUBI #4786] Support yarn-client and yarn-cluster
for YarnApplicationOperation
### _Why are the changes needed?_
The spark master might be yarn-client and yarn-cluster
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4786 from turboFei/yarn_client_cluster.
Closes #4786
accab6b81 [fwang12] Support yarn-client and yarn-cluster
Authored-by: fwang12
Signed-off-by: fwang12
---
.../apache/kyuubi/engine/KubernetesApplicationOperation.scala | 4 ++--
.../org/apache/kyuubi/engine/YarnApplicationOperation.scala | 4 +++-
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
index 83792f52f79..c569dc9dc56 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
@@ -17,6 +17,7 @@
package org.apache.kyuubi.engine
+import java.util.Locale
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import com.google.common.cache.{Cache, CacheBuilder, RemovalNotification}
@@ -74,8 +75,7 @@ class KubernetesApplicationOperation extends ApplicationOperation with Logging {
override def isSupported(clusterManager: Option[String]): Boolean = {
// TODO add deploy mode to check whether is supported
- kubernetesClient != null && clusterManager.nonEmpty &&
- clusterManager.get.toLowerCase.startsWith("k8s")
+ kubernetesClient != null && clusterManager.exists(_.toLowerCase(Locale.ROOT).startsWith("k8s"))
}
override def killApplicationByTag(tag: String): KillResponse = {
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala
index 446314208a9..ea2bf6dcd37 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala
@@ -17,6 +17,8 @@
package org.apache.kyuubi.engine
+import java.util.Locale
+
import scala.collection.JavaConverters._
import org.apache.hadoop.yarn.api.records.{FinalApplicationStatus, YarnApplicationState}
@@ -46,7 +48,7 @@ class YarnApplicationOperation extends ApplicationOperation with Logging {
}
override def isSupported(clusterManager: Option[String]): Boolean = {
- yarnClient != null && clusterManager.nonEmpty && "yarn".equalsIgnoreCase(clusterManager.get)
+ yarnClient != null && clusterManager.exists(_.toLowerCase(Locale.ROOT).startsWith("yarn"))
}
override def killApplicationByTag(tag: String): KillResponse = {
From d7417ce44fd38daf75352acaaac3cac2e16cb5b8 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Fri, 5 May 2023 14:15:54 +0800
Subject: [PATCH 093/404] [KYUUBI #4791] Add helper method to simplify REST
enabled judgment
### _Why are the changes needed?_
The REST enabled judgment will be used in other places, e.g. the developing batch API v2
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4791 from pan3793/conf.
Closes #4791
264566569 [Cheng Pan] Add helper method to simplify REST enabled judgement
Authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
.../org/apache/kyuubi/config/KyuubiConf.scala | 2 ++
.../kyuubi/session/KyuubiSessionManager.scala | 15 +++++----------
2 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
index e90dbe2392f..63a9ea648db 100644
--- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
+++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
@@ -188,6 +188,8 @@ case class KyuubiConf(loadSysDefault: Boolean = true) extends Logging {
s"and may be removed in the future. $comment")
}
}
+
+ def isRESTEnabled: Boolean = get(FRONTEND_PROTOCOLS).contains(FrontendProtocols.REST.toString)
}
/**
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala
index 73248cd5632..b0ed144a5d1 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala
@@ -47,16 +47,11 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) {
val operationManager = new KyuubiOperationManager()
val credentialsManager = new HadoopCredentialsManager()
val applicationManager = new KyuubiApplicationManager()
- private lazy val metadataManager: Option[MetadataManager] = {
- // Currently, the metadata manager is used by the REST frontend which provides batch job APIs,
- // so we initialize it only when Kyuubi starts with the REST frontend.
- if (conf.get(FRONTEND_PROTOCOLS).map(FrontendProtocols.withName)
- .contains(FrontendProtocols.REST)) {
- Option(new MetadataManager())
- } else {
- None
- }
- }
+
+ // Currently, the metadata manager is used by the REST frontend which provides batch job APIs,
+ // so we initialize it only when Kyuubi starts with the REST frontend.
+ lazy val metadataManager: Option[MetadataManager] =
+ if (conf.isRESTEnabled) Some(new MetadataManager()) else None
// lazy is required for plugins since the conf is null when this class initialization
lazy val sessionConfAdvisor: SessionConfAdvisor = PluginLoader.loadSessionConfAdvisor(conf)
From 3108c8e1a599d07e9637fb31cd09eba0d6bb0d56 Mon Sep 17 00:00:00 2001
From: huzk <1040080742@qq.com>
Date: Sat, 6 May 2023 14:10:39 +0800
Subject: [PATCH 094/404] [KYUUBI #4796] Expose JVM attributes to metrics
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
### _Why are the changes needed?_
related issue : #2526
i just make the grafana dashboard of kyuubi , but i can not get the metrics of kyuubi server start time.
![图片](https://user-images.githubusercontent.com/18548053/236595754-b839e608-a087-43e6-8c31-9b6639e94138.png)
we can add JvmAttributeGaugeSet to get the uptime metrics of kyuubi .
![图片](https://user-images.githubusercontent.com/18548053/236595818-d0b6958d-f660-403f-8f72-a1ef6f679383.png)
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4796 from Kyofin/master.
Closes #4796
ba1de910c [Cheng Pan] Update kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala
e2f15a6ee [Cheng Pan] Update kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala
665552028 [huzk] add jvm metrics
Lead-authored-by: huzk <1040080742@qq.com>
Co-authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
.../main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala | 1 +
.../src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala | 1 +
2 files changed, 2 insertions(+)
diff --git a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala
index e97fd28ea25..f615467f3f0 100644
--- a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala
+++ b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsConstants.scala
@@ -26,6 +26,7 @@ object MetricsConstants {
final val BUFFER_POOL: String = KYUUBI + "buffer_pool"
final val THREAD_STATE: String = KYUUBI + "thread_state"
final val CLASS_LOADING: String = KYUUBI + "class_loading"
+ final val JVM: String = KYUUBI + "jvm"
final val EXEC_POOL_ALIVE: String = KYUUBI + "exec.pool.threads.alive"
final val EXEC_POOL_ACTIVE: String = KYUUBI + "exec.pool.threads.active"
diff --git a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala
index 99da1f1b06e..26344ca56a3 100644
--- a/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala
+++ b/kyuubi-metrics/src/main/scala/org/apache/kyuubi/metrics/MetricsSystem.scala
@@ -67,6 +67,7 @@ class MetricsSystem extends CompositeService("MetricsSystem") {
}
override def initialize(conf: KyuubiConf): Unit = synchronized {
+ registry.registerAll(MetricsConstants.JVM, new JvmAttributeGaugeSet)
registry.registerAll(MetricsConstants.GC_METRIC, new GarbageCollectorMetricSet)
registry.registerAll(MetricsConstants.MEMORY_USAGE, new MemoryUsageGaugeSet)
registry.registerAll(
From ae3b81395c9a5805d226d56671c2acdbbf365a31 Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Sun, 7 May 2023 19:31:45 +0800
Subject: [PATCH 095/404] [KYUUBI #4798] Allows BatchJobSubmission to run in
sync mode
### _Why are the changes needed?_
Currently, BatchJobSubmission is only allowed to run in async mode, this PR makes the `shouldRunAsync` configurable and allows BatchJobSubmission to run in sync mode. (To minimize the change, in sync mode, the real submission and monitoring still happen on the exec pool, the BatchJobSubmission just blocks until the batch is finished)
This PR also refactors the constructor parameters of `KyuubiBatchSessionImpl`, and unwrapped the BatchRequest to make it fit the Batch V2 design.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4798 from pan3793/batch-sync.
Closes #4798
38eee2708 [Cheng Pan] Allows BatchJobSubmission run in sync mode
Authored-by: Cheng Pan
Signed-off-by: Cheng Pan
---
.../kyuubi/operation/BatchJobSubmission.scala | 52 +++++++---------
.../operation/KyuubiOperationManager.scala | 6 +-
.../session/KyuubiBatchSessionImpl.scala | 57 ++++++++---------
.../kyuubi/session/KyuubiSessionManager.scala | 62 +++++++++++++------
4 files changed, 98 insertions(+), 79 deletions(-)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala
index 702a9a917a3..e77416d31af 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala
@@ -58,12 +58,11 @@ class BatchJobSubmission(
className: String,
batchConf: Map[String, String],
batchArgs: Seq[String],
- recoveryMetadata: Option[Metadata])
+ recoveryMetadata: Option[Metadata],
+ override val shouldRunAsync: Boolean)
extends KyuubiApplicationOperation(session) {
import BatchJobSubmission._
- override def shouldRunAsync: Boolean = true
-
private val _operationLog = OperationLog.createOperationLog(session, getHandle)
private val applicationManager = session.sessionManager.applicationManager
@@ -131,17 +130,10 @@ class BatchJobSubmission(
session.sessionConf.get(KyuubiConf.BATCH_APPLICATION_STARVATION_TIMEOUT)
private def updateBatchMetadata(): Unit = {
- val endTime =
- if (isTerminalState(state)) {
- lastAccessTime
- } else {
- 0L
- }
+ val endTime = if (isTerminalState(state)) lastAccessTime else 0L
- if (isTerminalState(state)) {
- if (_applicationInfo.isEmpty) {
- _applicationInfo = Some(ApplicationInfo.NOT_FOUND)
- }
+ if (isTerminalState(state) && _applicationInfo.isEmpty) {
+ _applicationInfo = Some(ApplicationInfo.NOT_FOUND)
}
_applicationInfo.foreach { appInfo =>
@@ -187,27 +179,24 @@ class BatchJobSubmission(
override protected def runInternal(): Unit = session.handleSessionException {
val asyncOperation: Runnable = () => {
try {
- if (recoveryMetadata.exists(_.peerInstanceClosed)) {
- setState(OperationState.CANCELED)
- } else {
- // If it is in recovery mode, only re-submit batch job if previous state is PENDING and
- // fail to fetch the status including appId from resource manager. Otherwise, monitor the
- // submitted batch application.
- recoveryMetadata.map { metadata =>
- if (metadata.state == OperationState.PENDING.toString) {
- _applicationInfo = currentApplicationInfo()
- applicationId(_applicationInfo) match {
- case Some(appId) => monitorBatchJob(appId)
- case None => submitAndMonitorBatchJob()
- }
- } else {
- monitorBatchJob(metadata.engineId)
+ recoveryMetadata match {
+ case Some(metadata) if metadata.peerInstanceClosed =>
+ setState(OperationState.CANCELED)
+ case Some(metadata) if metadata.state == OperationState.PENDING.toString =>
+ // In recovery mode, only submit batch job when previous state is PENDING
+ // and fail to fetch the status including appId from resource manager.
+ // Otherwise, monitor the submitted batch application.
+ _applicationInfo = currentApplicationInfo()
+ applicationId(_applicationInfo) match {
+ case Some(appId) => monitorBatchJob(appId)
+ case None => submitAndMonitorBatchJob()
}
- }.getOrElse {
+ case Some(metadata) =>
+ monitorBatchJob(metadata.engineId)
+ case None =>
submitAndMonitorBatchJob()
- }
- setStateIfNotCanceled(OperationState.FINISHED)
}
+ setStateIfNotCanceled(OperationState.FINISHED)
} catch {
onError()
} finally {
@@ -225,6 +214,7 @@ class BatchJobSubmission(
updateBatchMetadata()
}
}
+ if (!shouldRunAsync) getBackgroundHandle.get()
}
private def submitAndMonitorBatchJob(): Unit = {
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperationManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperationManager.scala
index dd4889653cf..6846d0316af 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperationManager.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/KyuubiOperationManager.scala
@@ -81,7 +81,8 @@ class KyuubiOperationManager private (name: String) extends OperationManager(nam
className: String,
batchConf: Map[String, String],
batchArgs: Seq[String],
- recoveryMetadata: Option[Metadata]): BatchJobSubmission = {
+ recoveryMetadata: Option[Metadata],
+ shouldRunAsync: Boolean): BatchJobSubmission = {
val operation = new BatchJobSubmission(
session,
batchType,
@@ -90,7 +91,8 @@ class KyuubiOperationManager private (name: String) extends OperationManager(nam
className,
batchConf,
batchArgs,
- recoveryMetadata)
+ recoveryMetadata,
+ shouldRunAsync)
addOperation(operation)
operation
}
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSessionImpl.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSessionImpl.scala
index 94859a08cf8..ba204682985 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSessionImpl.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiBatchSessionImpl.scala
@@ -21,7 +21,6 @@ import scala.collection.JavaConverters._
import org.apache.hive.service.rpc.thrift.TProtocolVersion
-import org.apache.kyuubi.client.api.v1.dto.BatchRequest
import org.apache.kyuubi.client.util.BatchUtils._
import org.apache.kyuubi.config.{KyuubiConf, KyuubiReservedKeys}
import org.apache.kyuubi.engine.KyuubiApplicationManager
@@ -38,8 +37,14 @@ class KyuubiBatchSessionImpl(
conf: Map[String, String],
override val sessionManager: KyuubiSessionManager,
val sessionConf: KyuubiConf,
- batchRequest: BatchRequest,
- recoveryMetadata: Option[Metadata] = None)
+ batchType: String,
+ batchName: Option[String],
+ resource: String,
+ className: String,
+ batchConf: Map[String, String],
+ batchArgs: Seq[String],
+ recoveryMetadata: Option[Metadata] = None,
+ shouldRunAsync: Boolean)
extends KyuubiSession(
TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1,
user,
@@ -68,42 +73,41 @@ class KyuubiBatchSessionImpl(
override val sessionIdleTimeoutThreshold: Long =
sessionManager.getConf.get(KyuubiConf.BATCH_SESSION_IDLE_TIMEOUT)
- override val normalizedConf: Map[String, String] = {
- sessionConf.getBatchConf(batchRequest.getBatchType) ++
- sessionManager.validateBatchConf(batchRequest.getConf.asScala.toMap)
- }
+ override val normalizedConf: Map[String, String] =
+ sessionConf.getBatchConf(batchType) ++ sessionManager.validateBatchConf(batchConf)
- private val optimizedConf: Map[String, String] = {
+ val optimizedConf: Map[String, String] = {
val confOverlay = sessionManager.sessionConfAdvisor.getConfOverlay(
user,
normalizedConf.asJava)
if (confOverlay != null) {
val overlayConf = new KyuubiConf(false)
confOverlay.asScala.foreach { case (k, v) => overlayConf.set(k, v) }
- normalizedConf ++ overlayConf.getBatchConf(batchRequest.getBatchType)
+ normalizedConf ++ overlayConf.getBatchConf(batchType)
} else {
warn(s"the server plugin return null value for user: $user, ignore it")
normalizedConf
}
}
- override lazy val name: Option[String] = Option(batchRequest.getName).orElse(
- optimizedConf.get(KyuubiConf.SESSION_NAME.key))
+ override lazy val name: Option[String] =
+ batchName.filterNot(_.trim.isEmpty).orElse(optimizedConf.get(KyuubiConf.SESSION_NAME.key))
// whether the resource file is from uploading
- private[kyuubi] val isResourceUploaded: Boolean = batchRequest.getConf
- .getOrDefault(KyuubiReservedKeys.KYUUBI_BATCH_RESOURCE_UPLOADED_KEY, "false").toBoolean
+ private[kyuubi] val isResourceUploaded: Boolean =
+ batchConf.getOrElse(KyuubiReservedKeys.KYUUBI_BATCH_RESOURCE_UPLOADED_KEY, "false").toBoolean
private[kyuubi] lazy val batchJobSubmissionOp = sessionManager.operationManager
.newBatchJobSubmissionOperation(
this,
- batchRequest.getBatchType,
+ batchType,
name.orNull,
- batchRequest.getResource,
- batchRequest.getClassName,
+ resource,
+ className,
optimizedConf,
- batchRequest.getArgs.asScala,
- recoveryMetadata)
+ batchArgs,
+ recoveryMetadata,
+ shouldRunAsync)
private def waitMetadataRequestsRetryCompletion(): Unit = {
val batchId = batchJobSubmissionOp.batchId
@@ -127,14 +131,11 @@ class KyuubiBatchSessionImpl(
override def checkSessionAccessPathURIs(): Unit = {
KyuubiApplicationManager.checkApplicationAccessPaths(
- batchRequest.getBatchType,
+ batchType,
optimizedConf,
sessionManager.getConf)
- if (batchRequest.getResource != SparkProcessBuilder.INTERNAL_RESOURCE
- && !isResourceUploaded) {
- KyuubiApplicationManager.checkApplicationAccessPath(
- batchRequest.getResource,
- sessionManager.getConf)
+ if (resource != SparkProcessBuilder.INTERNAL_RESOURCE && !isResourceUploaded) {
+ KyuubiApplicationManager.checkApplicationAccessPath(resource, sessionManager.getConf)
}
}
@@ -150,13 +151,13 @@ class KyuubiBatchSessionImpl(
ipAddress = ipAddress,
kyuubiInstance = connectionUrl,
state = OperationState.PENDING.toString,
- resource = batchRequest.getResource,
- className = batchRequest.getClassName,
+ resource = resource,
+ className = className,
requestName = name.orNull,
requestConf = optimizedConf,
- requestArgs = batchRequest.getArgs.asScala,
+ requestArgs = batchArgs,
createTime = createTime,
- engineType = batchRequest.getBatchType,
+ engineType = batchType,
clusterManager = batchJobSubmissionOp.builder.clusterManager())
// there is a chance that operation failed w/ duplicated key error
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala
index b0ed144a5d1..0ef3f1ac104 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionManager.scala
@@ -124,23 +124,38 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) {
}
}
- private def createBatchSession(
+ // scalastyle:off
+ def createBatchSession(
user: String,
password: String,
ipAddress: String,
conf: Map[String, String],
- batchRequest: BatchRequest,
- recoveryMetadata: Option[Metadata] = None): KyuubiBatchSessionImpl = {
+ batchType: String,
+ batchName: Option[String],
+ resource: String,
+ className: String,
+ batchConf: Map[String, String],
+ batchArgs: Seq[String],
+ recoveryMetadata: Option[Metadata] = None,
+ shouldRunAsync: Boolean): KyuubiBatchSessionImpl = {
+ // scalastyle:on
val username = Option(user).filter(_.nonEmpty).getOrElse("anonymous")
+ val sessionConf = this.getConf.getUserDefaults(user)
new KyuubiBatchSessionImpl(
username,
password,
ipAddress,
conf,
this,
- this.getConf.getUserDefaults(user),
- batchRequest,
- recoveryMetadata)
+ sessionConf,
+ batchType,
+ batchName,
+ resource,
+ className,
+ batchConf,
+ batchArgs,
+ recoveryMetadata,
+ shouldRunAsync)
}
private[kyuubi] def openBatchSession(batchSession: KyuubiBatchSessionImpl): SessionHandle = {
@@ -178,8 +193,21 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) {
password: String,
ipAddress: String,
conf: Map[String, String],
- batchRequest: BatchRequest): SessionHandle = {
- val batchSession = createBatchSession(user, password, ipAddress, conf, batchRequest)
+ batchRequest: BatchRequest,
+ shouldRunAsync: Boolean = true): SessionHandle = {
+ val batchSession = createBatchSession(
+ user,
+ password,
+ ipAddress,
+ conf,
+ batchRequest.getBatchType,
+ Option(batchRequest.getName),
+ batchRequest.getResource,
+ batchRequest.getClassName,
+ batchRequest.getConf.asScala.toMap,
+ batchRequest.getArgs.asScala,
+ None,
+ shouldRunAsync)
openBatchSession(batchSession)
}
@@ -246,21 +274,19 @@ class KyuubiSessionManager private (name: String) extends SessionManager(name) {
kyuubiInstance,
0,
Int.MaxValue).map { metadata =>
- val batchRequest = new BatchRequest(
- metadata.engineType,
- metadata.resource,
- metadata.className,
- metadata.requestName,
- metadata.requestConf.asJava,
- metadata.requestArgs.asJava)
-
createBatchSession(
metadata.username,
"anonymous",
metadata.ipAddress,
metadata.requestConf,
- batchRequest,
- Some(metadata))
+ metadata.engineType,
+ Option(metadata.requestName),
+ metadata.resource,
+ metadata.className,
+ metadata.requestConf,
+ metadata.requestArgs,
+ Some(metadata),
+ shouldRunAsync = true)
}).getOrElse(Seq.empty)
}
}
From f8109b0ad72bddd0fd65154a432dfed2dd575939 Mon Sep 17 00:00:00 2001
From: Kent Yao
Date: Mon, 8 May 2023 13:35:55 +0800
Subject: [PATCH 096/404] [KYUUBI #4800] Update readthedocs.yaml
### _Why are the changes needed?_
- readthedocs.yml shall be readthedocs.yaml.
- Add OS version
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4800 from yaooqinn/readthedocs.
Closes #4800
ba70cc529 [Kent Yao] Update readthedocs.yaml
Authored-by: Kent Yao
Signed-off-by: Kent Yao
---
.readthedocs.yml => .readthedocs.yaml | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
rename .readthedocs.yml => .readthedocs.yaml (76%)
diff --git a/.readthedocs.yml b/.readthedocs.yaml
similarity index 76%
rename from .readthedocs.yml
rename to .readthedocs.yaml
index 671f2926628..115d9c33885 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yaml
@@ -16,23 +16,19 @@
#
version: 2
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.11"
-# Build documentation in the docs/ directory with Sphinx
sphinx:
builder: html
configuration: docs/conf.py
-# Build documentation with MkDocs
-#mkdocs:
-# configuration: mkdocs.yml
-
-# Optionally build your docs in additional formats such as PDF
formats:
- pdf
- epub
-# Optionally set the version of Python and requirements required to build your docs
python:
- version: 3.7
install:
- requirements: docs/requirements.txt
From 6ae0c8b1417995ec3d0036dc79214db83368b0db Mon Sep 17 00:00:00 2001
From: fwang12
Date: Mon, 8 May 2023 15:05:47 +0800
Subject: [PATCH 097/404] [KYUUBI #4801] Using different engine submit timeout
config for kubernetes and yarn
### _Why are the changes needed?_
We shall use different engine submit timeout for different resource manager.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4801 from turboFei/engine_submit_timeout.
Closes #4801
e34852a64 [fwang12] nit
ad69008e7 [fwang12] 1.7.2
db11330c5 [fwang12] save
Authored-by: fwang12
Signed-off-by: fwang12
---
docs/deployment/settings.md | 2 ++
.../scala/org/apache/kyuubi/config/KyuubiConf.scala | 12 ++++++++++++
.../engine/KubernetesApplicationOperation.scala | 2 +-
.../kyuubi/engine/YarnApplicationOperation.scala | 2 +-
4 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/docs/deployment/settings.md b/docs/deployment/settings.md
index b12185c3c29..0c7cdfc89bf 100644
--- a/docs/deployment/settings.md
+++ b/docs/deployment/settings.md
@@ -155,6 +155,7 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co
| kyuubi.engine.jdbc.java.options | <undefined> | The extra Java options for the JDBC query engine | string | 1.6.0 |
| kyuubi.engine.jdbc.memory | 1g | The heap memory for the JDBC query engine | string | 1.6.0 |
| kyuubi.engine.jdbc.type | <undefined> | The short name of JDBC type | string | 1.6.0 |
+| kyuubi.engine.kubernetes.submit.timeout | PT30S | The engine submit timeout for Kubernetes application. | duration | 1.7.2 |
| kyuubi.engine.operation.convert.catalog.database.enabled | true | When set to true, The engine converts the JDBC methods of set/get Catalog and set/get Schema to the implementation of different engines | boolean | 1.6.0 |
| kyuubi.engine.operation.log.dir.root | engine_operation_logs | Root directory for query operation log at engine-side. | string | 1.4.0 |
| kyuubi.engine.pool.name | engine-pool | The name of the engine pool. | string | 1.5.0 |
@@ -182,6 +183,7 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co
| kyuubi.engine.user.isolated.spark.session | true | When set to false, if the engine is running in a group or server share level, all the JDBC/ODBC connections will be isolated against the user. Including the temporary views, function registries, SQL configuration, and the current database. Note that, it does not affect if the share level is connection or user. | boolean | 1.6.0 |
| kyuubi.engine.user.isolated.spark.session.idle.interval | PT1M | The interval to check if the user-isolated Spark session is timeout. | duration | 1.6.0 |
| kyuubi.engine.user.isolated.spark.session.idle.timeout | PT6H | If kyuubi.engine.user.isolated.spark.session is false, we will release the Spark session if its corresponding user is inactive after this configured timeout. | duration | 1.6.0 |
+| kyuubi.engine.yarn.submit.timeout | PT30S | The engine submit timeout for YARN application. | duration | 1.7.2 |
### Event
diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
index 63a9ea648db..a6a59406370 100644
--- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
+++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
@@ -2577,6 +2577,18 @@ object KyuubiConf {
.timeConf
.createWithDefaultString("PT30S")
+ val ENGINE_KUBERNETES_SUBMIT_TIMEOUT: ConfigEntry[Long] =
+ buildConf("kyuubi.engine.kubernetes.submit.timeout")
+ .doc("The engine submit timeout for Kubernetes application.")
+ .version("1.7.2")
+ .fallbackConf(ENGINE_SUBMIT_TIMEOUT)
+
+ val ENGINE_YARN_SUBMIT_TIMEOUT: ConfigEntry[Long] =
+ buildConf("kyuubi.engine.yarn.submit.timeout")
+ .doc("The engine submit timeout for YARN application.")
+ .version("1.7.2")
+ .fallbackConf(ENGINE_SUBMIT_TIMEOUT)
+
/**
* Holds information about keys that have been deprecated.
*
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
index c569dc9dc56..a6fe286748a 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
@@ -49,7 +49,7 @@ class KubernetesApplicationOperation extends ApplicationOperation with Logging {
kubernetesClient = KubernetesUtils.buildKubernetesClient(conf) match {
case Some(client) =>
info(s"Initialized Kubernetes Client connect to: ${client.getMasterUrl}")
- submitTimeout = conf.get(KyuubiConf.ENGINE_SUBMIT_TIMEOUT)
+ submitTimeout = conf.get(KyuubiConf.ENGINE_KUBERNETES_SUBMIT_TIMEOUT)
// Disable resync, see https://github.com/fabric8io/kubernetes-client/discussions/5015
enginePodInformer = client.pods()
.withLabel(LABEL_KYUUBI_UNIQUE_KEY)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala
index ea2bf6dcd37..1f06484fc6a 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/YarnApplicationOperation.scala
@@ -37,7 +37,7 @@ class YarnApplicationOperation extends ApplicationOperation with Logging {
private var submitTimeout: Long = _
override def initialize(conf: KyuubiConf): Unit = {
- submitTimeout = conf.get(KyuubiConf.ENGINE_SUBMIT_TIMEOUT)
+ submitTimeout = conf.get(KyuubiConf.ENGINE_YARN_SUBMIT_TIMEOUT)
val yarnConf = KyuubiHadoopUtils.newYarnConfiguration(conf)
// YarnClient is thread-safe
val c = YarnClient.createYarnClient()
From d73ec64b371cfa3e94d44e39a00c275acdda6abc Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Mon, 8 May 2023 22:45:52 +0800
Subject: [PATCH 098/404] [KYUUBI #4733] Introduce Kafka event logger for
server events
### _Why are the changes needed?_
- introduce new event logger type `KAFKA`
- send server events to the Kafka topic with initializing and closing Kafka producer properly with server's lifecyle
- use Kafka 3.4.0 as the client version, and tested with Kakfa servers of 2.8.x and 3.4.x
### _How was this patch tested?_
- [x] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4733 from bowenliang123/kafka-logger.
Closes #4733
b5220d234 [liangbowen] introduce kafka server event logger
Authored-by: liangbowen
Signed-off-by: liangbowen
---
LICENSE-binary | 4 +
NOTICE-binary | 25 +++-
dev/dependencyList | 4 +
docs/deployment/settings.md | 26 ++--
.../org/apache/kyuubi/config/KyuubiConf.scala | 26 +++-
kyuubi-events/pom.xml | 5 +
.../kyuubi/events/EventHandlerRegister.scala | 7 ++
.../kyuubi/events/EventLoggerType.scala | 3 +-
.../handler/KafkaLoggingEventHandler.scala | 70 +++++++++++
kyuubi-server/pom.xml | 17 +++
.../events/ServerEventHandlerRegister.scala | 21 +++-
.../ServerKafkaLoggingEventHandler.scala | 31 +++++
.../ServerKafkaLoggingEventHandlerSuite.scala | 113 ++++++++++++++++++
pom.xml | 14 +++
14 files changed, 348 insertions(+), 18 deletions(-)
create mode 100644 kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/KafkaLoggingEventHandler.scala
create mode 100644 kyuubi-server/src/main/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandler.scala
create mode 100644 kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandlerSuite.scala
diff --git a/LICENSE-binary b/LICENSE-binary
index a52ea95fbf0..5422596580e 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -321,6 +321,9 @@ io.vertx:vertx-grpc
org.apache.zookeeper:zookeeper
com.squareup.retrofit2:retrofit
com.squareup.okhttp3:okhttp
+org.apache.kafka:kafka-clients
+org.lz4:lz4-java
+org.xerial.snappy:snappy-java
BSD
------------
@@ -332,6 +335,7 @@ com.thoughtworks.paranamer:paranamer
dk.brics.automaton:automaton
com.google.protobuf:protobuf-java-util
com.google.protobuf:protobuf-java
+com.github.luben:zstd-jni
Eclipse Distribution License - v 1.0
------------------------------------
diff --git a/NOTICE-binary b/NOTICE-binary
index ef58e21f6a2..16281d0d861 100644
--- a/NOTICE-binary
+++ b/NOTICE-binary
@@ -1236,7 +1236,7 @@ This product optionally depends on 'zstd-jni', a zstd-jni Java compression
and decompression library, which can be obtained at:
* LICENSE:
- * license/LICENSE.zstd-jni.txt (Apache License 2.0)
+ * license/LICENSE.zstd-jni.txt (BSD License)
* HOMEPAGE:
* https://github.com/luben/zstd-jni
@@ -1370,3 +1370,26 @@ decompression for Java., which can be obtained at:
* HOMEPAGE:
* https://github.com/hyperxpro/Brotli4j
+This product depends on 'kafka-clients', Java clients for Kafka,
+which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.kafka.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/apache/kafka
+
+This product optionally depends on 'snappy-java', Snappy compression and
+decompression for Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.snappy-java.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/xerial/snappy-java
+
+This product optionally depends on 'lz4-java', Lz4 compression and
+decompression for Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lz4-java.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/lz4/lz4-java
diff --git a/dev/dependencyList b/dev/dependencyList
index 0abcc51969b..11f2e2c3c2d 100644
--- a/dev/dependencyList
+++ b/dev/dependencyList
@@ -107,6 +107,7 @@ jetty-util-ajax/9.4.50.v20221201//jetty-util-ajax-9.4.50.v20221201.jar
jetty-util/9.4.50.v20221201//jetty-util-9.4.50.v20221201.jar
jline/0.9.94//jline-0.9.94.jar
jul-to-slf4j/1.7.36//jul-to-slf4j-1.7.36.jar
+kafka-clients/3.4.0//kafka-clients-3.4.0.jar
kubernetes-client-api/6.4.1//kubernetes-client-api-6.4.1.jar
kubernetes-client/6.4.1//kubernetes-client-6.4.1.jar
kubernetes-httpclient-okhttp/6.4.1//kubernetes-httpclient-okhttp-6.4.1.jar
@@ -138,6 +139,7 @@ log4j-api/2.20.0//log4j-api-2.20.0.jar
log4j-core/2.20.0//log4j-core-2.20.0.jar
log4j-slf4j-impl/2.20.0//log4j-slf4j-impl-2.20.0.jar
logging-interceptor/3.12.12//logging-interceptor-3.12.12.jar
+lz4-java/1.8.0//lz4-java-1.8.0.jar
metrics-core/4.2.8//metrics-core-4.2.8.jar
metrics-jmx/4.2.8//metrics-jmx-4.2.8.jar
metrics-json/4.2.8//metrics-json-4.2.8.jar
@@ -181,6 +183,7 @@ simpleclient_tracer_otel/0.16.0//simpleclient_tracer_otel-0.16.0.jar
simpleclient_tracer_otel_agent/0.16.0//simpleclient_tracer_otel_agent-0.16.0.jar
slf4j-api/1.7.36//slf4j-api-1.7.36.jar
snakeyaml/1.33//snakeyaml-1.33.jar
+snappy-java/1.1.8.4//snappy-java-1.1.8.4.jar
swagger-annotations/2.2.1//swagger-annotations-2.2.1.jar
swagger-core/2.2.1//swagger-core-2.2.1.jar
swagger-integration/2.2.1//swagger-integration-2.2.1.jar
@@ -193,3 +196,4 @@ vertx-core/4.3.2//vertx-core-4.3.2.jar
vertx-grpc/4.3.2//vertx-grpc-4.3.2.jar
zjsonpatch/0.3.0//zjsonpatch-0.3.0.jar
zookeeper/3.4.14//zookeeper-3.4.14.jar
+zstd-jni/1.5.2-1//zstd-jni-1.5.2-1.jar
diff --git a/docs/deployment/settings.md b/docs/deployment/settings.md
index 0c7cdfc89bf..a358b42701e 100644
--- a/docs/deployment/settings.md
+++ b/docs/deployment/settings.md
@@ -58,18 +58,20 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co
### Backend
-| Key | Default | Meaning | Type | Since |
-|--------------------------------------------------|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------|
-| kyuubi.backend.engine.exec.pool.keepalive.time | PT1M | Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in SQL engine applications | duration | 1.0.0 |
-| kyuubi.backend.engine.exec.pool.shutdown.timeout | PT10S | Timeout(ms) for the operation execution thread pool to terminate in SQL engine applications | duration | 1.0.0 |
-| kyuubi.backend.engine.exec.pool.size | 100 | Number of threads in the operation execution thread pool of SQL engine applications | int | 1.0.0 |
-| kyuubi.backend.engine.exec.pool.wait.queue.size | 100 | Size of the wait queue for the operation execution thread pool in SQL engine applications | int | 1.0.0 |
-| kyuubi.backend.server.event.json.log.path | file:///tmp/kyuubi/events | The location of server events go for the built-in JSON logger | string | 1.4.0 |
-| kyuubi.backend.server.event.loggers || A comma-separated list of server history loggers, where session/operation etc events go.
JSON: the events will be written to the location of kyuubi.backend.server.event.json.log.path
JDBC: to be done
CUSTOM: User-defined event handlers.
Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a class which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider which has a zero-arg constructor. | seq | 1.4.0 |
-| kyuubi.backend.server.exec.pool.keepalive.time | PT1M | Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in Kyuubi server | duration | 1.0.0 |
-| kyuubi.backend.server.exec.pool.shutdown.timeout | PT10S | Timeout(ms) for the operation execution thread pool to terminate in Kyuubi server | duration | 1.0.0 |
-| kyuubi.backend.server.exec.pool.size | 100 | Number of threads in the operation execution thread pool of Kyuubi server | int | 1.0.0 |
-| kyuubi.backend.server.exec.pool.wait.queue.size | 100 | Size of the wait queue for the operation execution thread pool of Kyuubi server | int | 1.0.0 |
+| Key | Default | Meaning | Type | Since |
+|--------------------------------------------------|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|-------|
+| kyuubi.backend.engine.exec.pool.keepalive.time | PT1M | Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in SQL engine applications | duration | 1.0.0 |
+| kyuubi.backend.engine.exec.pool.shutdown.timeout | PT10S | Timeout(ms) for the operation execution thread pool to terminate in SQL engine applications | duration | 1.0.0 |
+| kyuubi.backend.engine.exec.pool.size | 100 | Number of threads in the operation execution thread pool of SQL engine applications | int | 1.0.0 |
+| kyuubi.backend.engine.exec.pool.wait.queue.size | 100 | Size of the wait queue for the operation execution thread pool in SQL engine applications | int | 1.0.0 |
+| kyuubi.backend.server.event.json.log.path | file:///tmp/kyuubi/events | The location of server events go for the built-in JSON logger | string | 1.4.0 |
+| kyuubi.backend.server.event.kafka.close.timeout | PT5S | Period to wait for Kafka producer of server event handlers to close. | duration | 1.8.0 |
+| kyuubi.backend.server.event.kafka.topic | <undefined> | The topic of server events go for the built-in Kafka logger | string | 1.8.0 |
+| kyuubi.backend.server.event.loggers || A comma-separated list of server history loggers, where session/operation etc events go.
JSON: the events will be written to the location of kyuubi.backend.server.event.json.log.path
KAFKA: the events will be serialized in JSON format and sent to topic of `kyuubi.backend.server.event.kafka.topic`. Note: For the configs of Kafka producer, please specify them with the prefix: `kyuubi.backend.server.event.kafka.`. For example, `kyuubi.backend.server.event.kafka.bootstrap.servers=127.0.0.1:9092`
JDBC: to be done
CUSTOM: User-defined event handlers.
Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a class which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider which has a zero-arg constructor. | seq | 1.4.0 |
+| kyuubi.backend.server.exec.pool.keepalive.time | PT1M | Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in Kyuubi server | duration | 1.0.0 |
+| kyuubi.backend.server.exec.pool.shutdown.timeout | PT10S | Timeout(ms) for the operation execution thread pool to terminate in Kyuubi server | duration | 1.0.0 |
+| kyuubi.backend.server.exec.pool.size | 100 | Number of threads in the operation execution thread pool of Kyuubi server | int | 1.0.0 |
+| kyuubi.backend.server.exec.pool.wait.queue.size | 100 | Size of the wait queue for the operation execution thread pool of Kyuubi server | int | 1.0.0 |
### Batch
diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
index a6a59406370..8da336102ce 100644
--- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
+++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
@@ -2008,12 +2008,34 @@ object KyuubiConf {
.stringConf
.createWithDefault("file:///tmp/kyuubi/events")
+ val SERVER_EVENT_KAFKA_TOPIC: OptionalConfigEntry[String] =
+ buildConf("kyuubi.backend.server.event.kafka.topic")
+ .doc("The topic of server events go for the built-in Kafka logger")
+ .version("1.8.0")
+ .serverOnly
+ .stringConf
+ .createOptional
+
+ val SERVER_EVENT_KAFKA_CLOSE_TIMEOUT: ConfigEntry[Long] =
+ buildConf("kyuubi.backend.server.event.kafka.close.timeout")
+ .doc("Period to wait for Kafka producer of server event handlers to close.")
+ .version("1.8.0")
+ .serverOnly
+ .timeConf
+ .createWithDefault(Duration.ofMillis(5000).toMillis)
+
val SERVER_EVENT_LOGGERS: ConfigEntry[Seq[String]] =
buildConf("kyuubi.backend.server.event.loggers")
.doc("A comma-separated list of server history loggers, where session/operation etc" +
" events go.
" +
s"
JSON: the events will be written to the location of" +
s" ${SERVER_EVENT_JSON_LOG_PATH.key}
" +
+ s"
KAFKA: the events will be serialized in JSON format" +
+ s" and sent to topic of `${SERVER_EVENT_KAFKA_TOPIC.key}`." +
+ s" Note: For the configs of Kafka producer," +
+ s" please specify them with the prefix: `kyuubi.backend.server.event.kafka.`." +
+ s" For example, `kyuubi.backend.server.event.kafka.bootstrap.servers=127.0.0.1:9092`" +
+ s"
" +
s"
JDBC: to be done
" +
s"
CUSTOM: User-defined event handlers.
" +
" Note that: Kyuubi supports custom event handlers with the Java SPI." +
@@ -2026,7 +2048,9 @@ object KyuubiConf {
.stringConf
.transform(_.toUpperCase(Locale.ROOT))
.toSequence()
- .checkValue(_.toSet.subsetOf(Set("JSON", "JDBC", "CUSTOM")), "Unsupported event loggers")
+ .checkValue(
+ _.toSet.subsetOf(Set("JSON", "JDBC", "CUSTOM", "KAFKA")),
+ "Unsupported event loggers")
.createWithDefault(Nil)
@deprecated("using kyuubi.engine.spark.event.loggers instead", "1.6.0")
diff --git a/kyuubi-events/pom.xml b/kyuubi-events/pom.xml
index b97e9dffbb5..6b51fe015a1 100644
--- a/kyuubi-events/pom.xml
+++ b/kyuubi-events/pom.xml
@@ -37,6 +37,11 @@
${project.version}
+
+ org.apache.kafka
+ kafka-clients
+
+
org.apache.kyuubikyuubi-common_${scala.binary.version}
diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventHandlerRegister.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventHandlerRegister.scala
index 6c7e0893ff0..f75e4be4f51 100644
--- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventHandlerRegister.scala
+++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventHandlerRegister.scala
@@ -51,6 +51,10 @@ trait EventHandlerRegister extends Logging {
throw new KyuubiException(s"Unsupported jdbc event logger.")
}
+ protected def createKafkaEventHandler(kyuubiConf: KyuubiConf): EventHandler[KyuubiEvent] = {
+ throw new KyuubiException(s"Unsupported kafka event logger.")
+ }
+
private def loadEventHandler(
eventLoggerType: EventLoggerType,
kyuubiConf: KyuubiConf): Seq[EventHandler[KyuubiEvent]] = {
@@ -64,6 +68,9 @@ trait EventHandlerRegister extends Logging {
case EventLoggerType.JDBC =>
createJdbcEventHandler(kyuubiConf) :: Nil
+ case EventLoggerType.KAFKA =>
+ createKafkaEventHandler(kyuubiConf) :: Nil
+
case EventLoggerType.CUSTOM =>
EventHandlerLoader.loadCustom(kyuubiConf)
diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventLoggerType.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventLoggerType.scala
index a029a0fc5db..987982371e7 100644
--- a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventLoggerType.scala
+++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/EventLoggerType.scala
@@ -21,6 +21,5 @@ object EventLoggerType extends Enumeration {
type EventLoggerType = Value
- // TODO: Only SPARK is done now
- val SPARK, JSON, JDBC, CUSTOM = Value
+ val SPARK, JSON, JDBC, CUSTOM, KAFKA = Value
}
diff --git a/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/KafkaLoggingEventHandler.scala b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/KafkaLoggingEventHandler.scala
new file mode 100644
index 00000000000..a245daef0cd
--- /dev/null
+++ b/kyuubi-events/src/main/scala/org/apache/kyuubi/events/handler/KafkaLoggingEventHandler.scala
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.events.handler
+
+import java.time.Duration
+import java.util.Properties
+
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+
+import org.apache.kyuubi.Logging
+import org.apache.kyuubi.config.KyuubiConf
+import org.apache.kyuubi.events.KyuubiEvent
+import org.apache.kyuubi.events.handler.KafkaLoggingEventHandler._
+
+/**
+ * This event logger logs events to Kafka.
+ */
+class KafkaLoggingEventHandler(
+ topic: String,
+ producerConf: Map[String, String],
+ kyuubiConf: KyuubiConf,
+ closeTimeoutInMs: Long) extends EventHandler[KyuubiEvent] with Logging {
+ private def defaultProducerConf: Properties = {
+ val conf = new Properties()
+ conf.setProperty("key.serializer", DEFAULT_SERIALIZER_CLASS)
+ conf.setProperty("value.serializer", DEFAULT_SERIALIZER_CLASS)
+ conf
+ }
+
+ private val normalizedProducerConf: Properties = {
+ val conf = defaultProducerConf
+ producerConf.foreach(p => conf.setProperty(p._1, p._2))
+ conf
+ }
+
+ private val kafkaProducer = new KafkaProducer[String, String](normalizedProducerConf)
+
+ override def apply(event: KyuubiEvent): Unit = {
+ try {
+ val record = new ProducerRecord[String, String](topic, event.eventType, event.toJson)
+ kafkaProducer.send(record)
+ } catch {
+ case e: Exception =>
+ error("Failed to send event in KafkaEventHandler", e)
+ }
+ }
+
+ override def close(): Unit = {
+ kafkaProducer.close(Duration.ofMillis(closeTimeoutInMs))
+ }
+}
+
+object KafkaLoggingEventHandler {
+ private val DEFAULT_SERIALIZER_CLASS = "org.apache.kafka.common.serialization.StringSerializer"
+}
diff --git a/kyuubi-server/pom.xml b/kyuubi-server/pom.xml
index c4585b131f2..54d4507d50b 100644
--- a/kyuubi-server/pom.xml
+++ b/kyuubi-server/pom.xml
@@ -395,6 +395,23 @@
swagger-ui
+
+ org.apache.kafka
+ kafka-clients
+
+
+
+ com.dimafeng
+ testcontainers-scala-scalatest_${scala.binary.version}
+ test
+
+
+
+ com.dimafeng
+ testcontainers-scala-kafka_${scala.binary.version}
+ test
+
+
org.apache.hivehive-exec
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/events/ServerEventHandlerRegister.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/ServerEventHandlerRegister.scala
index 4ddee48ddfd..ca6c776ac8c 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/events/ServerEventHandlerRegister.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/ServerEventHandlerRegister.scala
@@ -19,8 +19,9 @@ package org.apache.kyuubi.events
import java.net.InetAddress
import org.apache.kyuubi.config.KyuubiConf
-import org.apache.kyuubi.config.KyuubiConf.{SERVER_EVENT_JSON_LOG_PATH, SERVER_EVENT_LOGGERS}
-import org.apache.kyuubi.events.handler.{EventHandler, ServerJsonLoggingEventHandler}
+import org.apache.kyuubi.config.KyuubiConf._
+import org.apache.kyuubi.events.handler.{EventHandler, ServerJsonLoggingEventHandler, ServerKafkaLoggingEventHandler}
+import org.apache.kyuubi.events.handler.ServerKafkaLoggingEventHandler.KAFKA_SERVER_EVENT_HANDLER_PREFIX
import org.apache.kyuubi.util.KyuubiHadoopUtils
object ServerEventHandlerRegister extends EventHandlerRegister {
@@ -36,6 +37,22 @@ object ServerEventHandlerRegister extends EventHandlerRegister {
kyuubiConf)
}
+ override def createKafkaEventHandler(kyuubiConf: KyuubiConf): EventHandler[KyuubiEvent] = {
+ val topic = kyuubiConf.get(SERVER_EVENT_KAFKA_TOPIC).getOrElse {
+ throw new IllegalArgumentException(s"${SERVER_EVENT_KAFKA_TOPIC.key} must be configured")
+ }
+ val closeTimeoutInMs = kyuubiConf.get(SERVER_EVENT_KAFKA_CLOSE_TIMEOUT)
+ val kafkaEventHandlerProducerConf =
+ kyuubiConf.getAllWithPrefix(KAFKA_SERVER_EVENT_HANDLER_PREFIX, "")
+ .filterKeys(
+ !List(SERVER_EVENT_KAFKA_TOPIC, SERVER_EVENT_KAFKA_CLOSE_TIMEOUT).map(_.key).contains(_))
+ ServerKafkaLoggingEventHandler(
+ topic,
+ kafkaEventHandlerProducerConf,
+ kyuubiConf,
+ closeTimeoutInMs)
+ }
+
override protected def getLoggers(conf: KyuubiConf): Seq[String] = {
conf.get(SERVER_EVENT_LOGGERS)
}
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandler.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandler.scala
new file mode 100644
index 00000000000..a7421a0570f
--- /dev/null
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandler.scala
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.events.handler
+
+import org.apache.kyuubi.config.KyuubiConf
+
+case class ServerKafkaLoggingEventHandler(
+ topic: String,
+ producerConf: Map[String, String],
+ kyuubiConf: KyuubiConf,
+ closeTimeoutInMs: Long)
+ extends KafkaLoggingEventHandler(topic, producerConf, kyuubiConf, closeTimeoutInMs)
+
+object ServerKafkaLoggingEventHandler {
+ val KAFKA_SERVER_EVENT_HANDLER_PREFIX = "kyuubi.backend.server.event.kafka"
+}
diff --git a/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandlerSuite.scala b/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandlerSuite.scala
new file mode 100644
index 00000000000..461414f3f91
--- /dev/null
+++ b/kyuubi-server/src/test/scala/org/apache/kyuubi/events/handler/ServerKafkaLoggingEventHandlerSuite.scala
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kyuubi.events.handler
+
+import java.time.Duration
+import java.util.Properties
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration._
+import scala.util.Random
+
+import com.dimafeng.testcontainers.KafkaContainer
+import com.dimafeng.testcontainers.scalatest.TestContainerForAll
+import com.fasterxml.jackson.databind.json.JsonMapper
+import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+
+import org.apache.kyuubi._
+import org.apache.kyuubi.config.KyuubiConf
+import org.apache.kyuubi.events.handler.ServerKafkaLoggingEventHandler.KAFKA_SERVER_EVENT_HANDLER_PREFIX
+import org.apache.kyuubi.operation.HiveJDBCTestHelper
+
+abstract class ServerKafkaLoggingEventHandlerSuite extends WithKyuubiServer with HiveJDBCTestHelper
+ with BatchTestHelper with TestContainerForAll {
+
+ /**
+ * `confluentinc/cp-kafka` is Confluent Community Docker Image for Apache Kafka.
+ * The list of compatibility for Kafka's version refers to:
+ * https://docs.confluent.io/platform/current/installation
+ * /versions-interoperability.html#cp-and-apache-ak-compatibility
+ */
+ protected val imageTag: String
+ override lazy val containerDef: KafkaContainer.Def =
+ KafkaContainer.Def(s"confluentinc/cp-kafka:$imageTag")
+ private val destTopic = "server-event-topic"
+ private val mapper = JsonMapper.builder().build()
+ override protected def jdbcUrl: String = getJdbcUrl
+
+ override protected val conf: KyuubiConf = {
+ KyuubiConf()
+ .set(KyuubiConf.SERVER_EVENT_LOGGERS, Seq("KAFKA"))
+ .set(KyuubiConf.SERVER_EVENT_KAFKA_TOPIC, destTopic)
+ }
+
+ override def beforeAll(): Unit = withContainers { kafkaContainer =>
+ val bootstrapServers = kafkaContainer.bootstrapServers
+ createTopic(kafkaContainer.bootstrapServers, destTopic)
+ conf.set(s"$KAFKA_SERVER_EVENT_HANDLER_PREFIX.bootstrap.servers", bootstrapServers)
+
+ super.beforeAll()
+ }
+
+ private def createTopic(kafkaServerUrl: String, topic: String): Unit = {
+ val adminProps = new Properties
+ adminProps.setProperty("bootstrap.servers", kafkaServerUrl)
+ val adminClient = AdminClient.create(adminProps)
+ adminClient.createTopics(List(new NewTopic(topic, 1, 1.toShort)).asJava)
+ adminClient.close()
+ }
+
+ test("check server events sent to kafka topic") {
+ withContainers { kafkaContainer =>
+ val consumerConf = new Properties
+ Map(
+ "bootstrap.servers" -> kafkaContainer.bootstrapServers,
+ "group.id" -> s"server-kafka-logger-test-${Random.nextInt}",
+ "auto.offset.reset" -> "earliest",
+ "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
+ "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer")
+ .foreach(p => consumerConf.setProperty(p._1, p._2))
+ val consumer = new KafkaConsumer[String, String](consumerConf)
+ try {
+ consumer.subscribe(List(destTopic).asJava)
+ eventually(timeout(10.seconds), interval(500.milliseconds)) {
+ val records = consumer.poll(Duration.ofMillis(500))
+ assert(records.count() > 0)
+ records.forEach { record =>
+ val jsonObj = mapper.readTree(record.value())
+ assertResult("kyuubi_server_info")(record.key)
+ assertResult(server.getName)(jsonObj.get("serverName").asText())
+ }
+ }
+ } finally {
+ consumer.close()
+ }
+ }
+ }
+}
+
+class ServerKafkaLoggingEventHandlerSuiteForKafka2 extends ServerKafkaLoggingEventHandlerSuite {
+ // equivalent to Apache Kafka 2.8.x
+ override val imageTag = "6.2.10"
+}
+
+class ServerKafkaLoggingEventHandlerSuiteForKafka3 extends ServerKafkaLoggingEventHandlerSuite {
+ // equivalent to Apache Kafka 3.3.x
+ override val imageTag = "7.3.3"
+}
diff --git a/pom.xml b/pom.xml
index af56eb1672f..2fe54e32938 100644
--- a/pom.xml
+++ b/pom.xml
@@ -165,6 +165,7 @@
9.4.50.v202212010.9.944.13.2
+ 3.4.06.4.11.15.06.0.5
@@ -545,6 +546,12 @@
${testcontainers-scala.version}
+
+ com.dimafeng
+ testcontainers-scala-kafka_${scala.binary.version}
+ ${testcontainers-scala.version}
+
+
io.fabric8kubernetes-client
@@ -1261,6 +1268,13 @@
+
+ org.apache.kafka
+ kafka-clients
+ ${kafka.version}
+ true
+
+
com.github.scoptscopt_${scala.binary.version}
From 4e0562df8f03e219768548773dcd6ec284427beb Mon Sep 17 00:00:00 2001
From: liangbowen
Date: Tue, 9 May 2023 08:44:00 +0800
Subject: [PATCH 099/404] [KYUUBI #4808] Bump Jetty from 9.4.50 to 9.4.51
### _Why are the changes needed?_
- to fix 2 Dependabot alerts
1. https://github.com/apache/kyuubi/security/dependabot/19
2. https://github.com/apache/kyuubi/security/dependabot/18
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4808 from bowenliang123/jetty-9.4.51.
Closes #4808
84789292b [liangbowen] update dependencyList
d1ebdc68a [liangbowen] Revert "update dependencyList"
e4e26b7b5 [liangbowen] update dependencyList
6cfb37dbb [liangbowen] bump jetty to 9.4.51
Authored-by: liangbowen
Signed-off-by: bowenliang
---
dev/dependencyList | 14 +++++++-------
pom.xml | 2 +-
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/dev/dependencyList b/dev/dependencyList
index 11f2e2c3c2d..b216b7092e1 100644
--- a/dev/dependencyList
+++ b/dev/dependencyList
@@ -98,13 +98,13 @@ jetcd-api/0.7.3//jetcd-api-0.7.3.jar
jetcd-common/0.7.3//jetcd-common-0.7.3.jar
jetcd-core/0.7.3//jetcd-core-0.7.3.jar
jetcd-grpc/0.7.3//jetcd-grpc-0.7.3.jar
-jetty-http/9.4.50.v20221201//jetty-http-9.4.50.v20221201.jar
-jetty-io/9.4.50.v20221201//jetty-io-9.4.50.v20221201.jar
-jetty-security/9.4.50.v20221201//jetty-security-9.4.50.v20221201.jar
-jetty-server/9.4.50.v20221201//jetty-server-9.4.50.v20221201.jar
-jetty-servlet/9.4.50.v20221201//jetty-servlet-9.4.50.v20221201.jar
-jetty-util-ajax/9.4.50.v20221201//jetty-util-ajax-9.4.50.v20221201.jar
-jetty-util/9.4.50.v20221201//jetty-util-9.4.50.v20221201.jar
+jetty-http/9.4.51.v20230217//jetty-http-9.4.51.v20230217.jar
+jetty-io/9.4.51.v20230217//jetty-io-9.4.51.v20230217.jar
+jetty-security/9.4.51.v20230217//jetty-security-9.4.51.v20230217.jar
+jetty-server/9.4.51.v20230217//jetty-server-9.4.51.v20230217.jar
+jetty-servlet/9.4.51.v20230217//jetty-servlet-9.4.51.v20230217.jar
+jetty-util-ajax/9.4.51.v20230217//jetty-util-ajax-9.4.51.v20230217.jar
+jetty-util/9.4.51.v20230217//jetty-util-9.4.51.v20230217.jar
jline/0.9.94//jline-0.9.94.jar
jul-to-slf4j/1.7.36//jul-to-slf4j-1.7.36.jar
kafka-clients/3.4.0//kafka-clients-3.4.0.jar
diff --git a/pom.xml b/pom.xml
index 2fe54e32938..03bf2a14975 100644
--- a/pom.xml
+++ b/pom.xml
@@ -162,7 +162,7 @@
2.3.21.2.22.39.1
- 9.4.50.v20221201
+ 9.4.51.v202302170.9.944.13.23.4.0
From 52a11e909beb6d14e098e79323065284c8e34835 Mon Sep 17 00:00:00 2001
From: bowenliang
Date: Tue, 9 May 2023 09:45:38 +0800
Subject: [PATCH 100/404] [KYUUBI #4805] Bump openai-java from 0.11.1 to 0.12.0
### _Why are the changes needed?_
- openai-java v0.12.0 release note: https://github.com/TheoKanning/openai-java/releases/tag/0.12.0
- client version preparation for streaming chat support, with `Streaming support for Test and Chat completion
` in v0.12.0
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4805 from bowenliang123/openai0.12.0.
Closes #4805
724d8e7d4 [bowenliang] bump openai-java from 0.11.1 to 0.12.0
Authored-by: bowenliang
Signed-off-by: bowenliang
---
pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pom.xml b/pom.xml
index 03bf2a14975..cff8f965627 100644
--- a/pom.xml
+++ b/pom.xml
@@ -173,7 +173,7 @@
8.0.324.6.14.1.89.Final
- 0.11.1
+ 0.12.01.10.16.0.00.16.0
From 7225f338f6bc93cc242afce9a9b018d05cdae258 Mon Sep 17 00:00:00 2001
From: bowenliang
Date: Tue, 9 May 2023 14:21:27 +0800
Subject: [PATCH 101/404] [KYUUBI #4809] [CHAT] Set session user in ChatGPT
request
### _Why are the changes needed?_
- set session user when opening instance of ChatProvider
- use session user in ChatGPT request, to identify user of message and better monitor and abuse detection by OpenAI report( https://platform.openai.com/docs/api-reference/chat/create#chat/create-user)
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4809 from bowenliang123/chat-user.
Closes #4809
615d2385a [bowenliang] set session user in chatgpt request
Authored-by: bowenliang
Signed-off-by: bowenliang
---
.../kyuubi/engine/chat/provider/ChatGPTProvider.scala | 10 +++++++---
.../kyuubi/engine/chat/provider/ChatProvider.scala | 2 +-
.../kyuubi/engine/chat/provider/EchoProvider.scala | 2 +-
.../kyuubi/engine/chat/session/ChatSessionImpl.scala | 2 +-
4 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala
index cdea89d2aad..03bf0b820e8 100644
--- a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala
+++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala
@@ -26,7 +26,7 @@ import scala.collection.JavaConverters._
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.theokanning.openai.OpenAiApi
-import com.theokanning.openai.completion.chat.{ChatCompletionRequest, ChatMessage}
+import com.theokanning.openai.completion.chat.{ChatCompletionRequest, ChatMessage, ChatMessageRole}
import com.theokanning.openai.service.OpenAiService
import com.theokanning.openai.service.OpenAiService.{defaultClient, defaultObjectMapper, defaultRetrofit}
@@ -60,6 +60,8 @@ class ChatGPTProvider(conf: KyuubiConf) extends ChatProvider {
new OpenAiService(api)
}
+ private var sessionUser: Option[String] = None
+
private val chatHistory: LoadingCache[String, util.ArrayDeque[ChatMessage]] =
CacheBuilder.newBuilder()
.expireAfterWrite(10, TimeUnit.MINUTES)
@@ -68,17 +70,19 @@ class ChatGPTProvider(conf: KyuubiConf) extends ChatProvider {
new util.ArrayDeque[ChatMessage]
})
- override def open(sessionId: String): Unit = {
+ override def open(sessionId: String, user: Option[String]): Unit = {
+ sessionUser = user
chatHistory.getIfPresent(sessionId)
}
override def ask(sessionId: String, q: String): String = {
val messages = chatHistory.get(sessionId)
try {
- messages.addLast(new ChatMessage("user", q))
+ messages.addLast(new ChatMessage(ChatMessageRole.USER.value(), q))
val completionRequest = ChatCompletionRequest.builder()
.model(conf.get(KyuubiConf.ENGINE_CHAT_GPT_MODEL))
.messages(messages.asScala.toList.asJava)
+ .user(sessionUser.orNull)
.build()
val responseText = openAiService.createChatCompletion(completionRequest).getChoices.asScala
.map(c => c.getMessage.getContent).mkString
diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatProvider.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatProvider.scala
index af1ba434bea..f9fa8bb5a78 100644
--- a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatProvider.scala
+++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatProvider.scala
@@ -28,7 +28,7 @@ import org.apache.kyuubi.reflection.DynConstructors
trait ChatProvider {
- def open(sessionId: String): Unit
+ def open(sessionId: String, user: Option[String] = None): Unit
def ask(sessionId: String, q: String): String
diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/EchoProvider.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/EchoProvider.scala
index 31ad3b8e390..1116ea785dc 100644
--- a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/EchoProvider.scala
+++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/EchoProvider.scala
@@ -19,7 +19,7 @@ package org.apache.kyuubi.engine.chat.provider
class EchoProvider extends ChatProvider {
- override def open(sessionId: String): Unit = {}
+ override def open(sessionId: String, user: Option[String]): Unit = {}
override def ask(sessionId: String, q: String): String =
"This is ChatKyuubi, nice to meet you!"
diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionImpl.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionImpl.scala
index 29f42076822..6ec6d062600 100644
--- a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionImpl.scala
+++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/session/ChatSessionImpl.scala
@@ -38,7 +38,7 @@ class ChatSessionImpl(
override def open(): Unit = {
info(s"Starting to open chat session.")
- chatProvider.open(handle.identifier.toString)
+ chatProvider.open(handle.identifier.toString, Some(user))
super.open()
info(s"The chat session is started.")
}
From e112e381fff99f1d619185521edb064d88d35873 Mon Sep 17 00:00:00 2001
From: bowenliang
Date: Wed, 10 May 2023 11:53:14 +0800
Subject: [PATCH 102/404] [KYUUBI #4810] [CHAT] Request and use a sginle choice
for chat completion
### _Why are the changes needed?_
- explicitly set `n` to 1 in ChatGPT chat completion request (default to 1, https://platform.openai.com/docs/api-reference/chat/create#chat/create-n)
- use the only one choice of the chat completion response
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4810 from bowenliang123/chat-onechoice.
Closes #4810
f221de4e8 [bowenliang] one message
Authored-by: bowenliang
Signed-off-by: bowenliang
---
.../apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala
index 03bf0b820e8..aae8b488a5c 100644
--- a/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala
+++ b/externals/kyuubi-chat-engine/src/main/scala/org/apache/kyuubi/engine/chat/provider/ChatGPTProvider.scala
@@ -83,9 +83,10 @@ class ChatGPTProvider(conf: KyuubiConf) extends ChatProvider {
.model(conf.get(KyuubiConf.ENGINE_CHAT_GPT_MODEL))
.messages(messages.asScala.toList.asJava)
.user(sessionUser.orNull)
+ .n(1)
.build()
- val responseText = openAiService.createChatCompletion(completionRequest).getChoices.asScala
- .map(c => c.getMessage.getContent).mkString
+ val responseText = openAiService.createChatCompletion(completionRequest)
+ .getChoices.get(0).getMessage.getContent
responseText
} catch {
case e: Throwable =>
From 47353911d219aab3460ad4ced14fc2483579b7cd Mon Sep 17 00:00:00 2001
From: Fu Chen
Date: Wed, 10 May 2023 20:17:04 +0800
Subject: [PATCH 103/404] [KYUUBI #4797] [ARROW] Reflective calls to the
function `ArrowUtils#toArrowSchema`
### _Why are the changes needed?_
to adapt Spark 3.5
the signature of function `ArrowUtils#toArrowSchema` is changed in https://github.com/apache/spark/pull/40988 (since Spark3.5)
Spark 3.4 or previous
```scala
def toArrowSchema(schema: StructType, timeZoneId: String): Schema
```
Spark 3.5 or later
```scala
def toArrowSchema(
schema: StructType,
timeZoneId: String,
errorOnDuplicatedFieldNames: Boolean): Schema
```
Kyuubi is not affected by the issue of duplicated nested field names, as it consistently converts struct types to string types in Arrow mode
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4797 from cfmcgrady/arrow-toArrowSchema.
Closes #4797
2eb881b87 [Fu Chen] auto box
f69e0b395 [Fu Chen] asInstanceOf[Object] -> new JBoolean(errorOnDuplicatedFieldNames)
84c0ed381 [Fu Chen] unnecessarily force conversions
5ca65df8e [Fu Chen] Revert "new JBoolean"
0f7a1b4bd [Fu Chen] new JBoolean
044ba421c [Fu Chen] update comment
989c3caf1 [Fu Chen] reflective call ArrowUtils.toArrowSchema
Authored-by: Fu Chen
Signed-off-by: Cheng Pan
---
.../arrow/KyuubiArrowConverters.scala | 36 +++++++++++++++++--
1 file changed, 34 insertions(+), 2 deletions(-)
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
index 8a34943cc44..5930dcdfc8c 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/spark/sql/execution/arrow/KyuubiArrowConverters.scala
@@ -18,6 +18,7 @@
package org.apache.spark.sql.execution.arrow
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
+import java.lang.{Boolean => JBoolean}
import java.nio.channels.Channels
import scala.collection.JavaConverters._
@@ -26,6 +27,7 @@ import scala.collection.mutable.ArrayBuffer
import org.apache.arrow.vector._
import org.apache.arrow.vector.ipc.{ArrowStreamWriter, ReadChannel, WriteChannel}
import org.apache.arrow.vector.ipc.message.{IpcOption, MessageSerializer}
+import org.apache.arrow.vector.types.pojo.{Schema => ArrowSchema}
import org.apache.spark.TaskContext
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
@@ -36,6 +38,8 @@ import org.apache.spark.sql.types._
import org.apache.spark.sql.util.ArrowUtils
import org.apache.spark.util.Utils
+import org.apache.kyuubi.reflection.DynMethods
+
object KyuubiArrowConverters extends SQLConfHelper with Logging {
type Batch = (Array[Byte], Long)
@@ -60,7 +64,7 @@ object KyuubiArrowConverters extends SQLConfHelper with Logging {
"slice",
0,
Long.MaxValue)
- val arrowSchema = ArrowUtils.toArrowSchema(schema, timeZoneId)
+ val arrowSchema = toArrowSchema(schema, timeZoneId, true)
vectorSchemaRoot = VectorSchemaRoot.create(arrowSchema, sliceAllocator)
try {
val recordBatch = MessageSerializer.deserializeRecordBatch(
@@ -238,7 +242,7 @@ object KyuubiArrowConverters extends SQLConfHelper with Logging {
context: TaskContext)
extends Iterator[Array[Byte]] {
- protected val arrowSchema = ArrowUtils.toArrowSchema(schema, timeZoneId)
+ protected val arrowSchema = toArrowSchema(schema, timeZoneId, true)
private val allocator =
ArrowUtils.rootAllocator.newChildAllocator(
s"to${this.getClass.getSimpleName}",
@@ -312,6 +316,34 @@ object KyuubiArrowConverters extends SQLConfHelper with Logging {
}
}
+ // the signature of function [[ArrowUtils.toArrowSchema]] is changed in SPARK-41971 (since Spark
+ // 3.5)
+ private lazy val toArrowSchemaMethod = DynMethods.builder("toArrowSchema")
+ .impl( // for Spark 3.4 or previous
+ "org.apache.spark.sql.util.ArrowUtils",
+ classOf[StructType],
+ classOf[String])
+ .impl( // for Spark 3.5 or later
+ "org.apache.spark.sql.util.ArrowUtils",
+ classOf[StructType],
+ classOf[String],
+ classOf[Boolean])
+ .build()
+
+ /**
+ * this function uses reflective calls to the [[ArrowUtils.toArrowSchema]].
+ */
+ private def toArrowSchema(
+ schema: StructType,
+ timeZone: String,
+ errorOnDuplicatedFieldNames: JBoolean): ArrowSchema = {
+ toArrowSchemaMethod.invoke[ArrowSchema](
+ ArrowUtils,
+ schema,
+ timeZone,
+ errorOnDuplicatedFieldNames)
+ }
+
// for testing
def fromBatchIterator(
arrowBatchIter: Iterator[Array[Byte]],
From 299df0d7c2ad9ad6509861fada2d25ee2933e1fd Mon Sep 17 00:00:00 2001
From: fwang12
Date: Wed, 10 May 2023 20:34:08 +0800
Subject: [PATCH 104/404] [KYUUBI #4811] Do not update app info after batch or
application terminated
### _Why are the changes needed?_
If the application has been terminated, it is not needed to update application. and It can prevent that, the correct application info is overwritten by NOT_FOUND state.
If the batch has been terminated, we shall get the batch report from metastore.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4811 from turboFei/k8s_status.
Closes #4811
8fc6fd6ab [fwang12] check app id defined
87b0797e0 [fwang12] if batch state is terminal, get state from metadata store
488433e05 [fwang12] save
Authored-by: fwang12
Signed-off-by: fwang12
---
.../kyuubi/operation/BatchJobSubmission.scala | 13 ++++++++-----
.../kyuubi/server/api/v1/BatchesResource.scala | 2 +-
2 files changed, 9 insertions(+), 6 deletions(-)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala
index e77416d31af..96022614ee4 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/operation/BatchJobSubmission.scala
@@ -298,11 +298,14 @@ class BatchJobSubmission(
}
private def updateApplicationInfoMetadataIfNeeded(): Unit = {
- val newApplicationStatus = currentApplicationInfo()
- if (newApplicationStatus.map(_.state) != _applicationInfo.map(_.state)) {
- _applicationInfo = newApplicationStatus
- updateBatchMetadata()
- info(s"Batch report for $batchId, ${_applicationInfo}")
+ if (applicationId(_applicationInfo).isEmpty ||
+ !_applicationInfo.map(_.state).exists(ApplicationState.isTerminated)) {
+ val newApplicationStatus = currentApplicationInfo()
+ if (newApplicationStatus.map(_.state) != _applicationInfo.map(_.state)) {
+ _applicationInfo = newApplicationStatus
+ updateBatchMetadata()
+ info(s"Batch report for $batchId, ${_applicationInfo}")
+ }
}
}
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/BatchesResource.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/BatchesResource.scala
index 38ce0e2973d..11c36c75735 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/BatchesResource.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/server/api/v1/BatchesResource.scala
@@ -82,7 +82,7 @@ private[v1] class BatchesResource extends ApiRequestContext with Logging {
var appState: String = null
var appDiagnostic: String = null
- if (batchAppStatus.nonEmpty) {
+ if (!OperationState.isTerminal(batchOpStatus.state) && batchAppStatus.nonEmpty) {
appId = batchAppStatus.get.id
appUrl = batchAppStatus.get.url.orNull
appState = batchAppStatus.get.state.toString
From d73d73eb2e2e155b3a1bd6bfba4c64596f89f0d0 Mon Sep 17 00:00:00 2001
From: dnskr
Date: Thu, 11 May 2023 11:01:17 +0800
Subject: [PATCH 105/404] [KYUUBI #4819] [K8S][HELM] Refactor liveness and
readiness probes
### _Why are the changes needed?_
The changes are needed to follow common flat structure pattern for liveness and readiness probes, for example:
- [Airflow](https://github.com/apache/airflow/blob/584a9f5dae5b29a1968fbdbc9b1edd01ae2be4d2/chart/values.yaml#L961-L973)
- [Superset](https://github.com/apache/superset/blob/e3719a1b076228dcfae3cdd82844bdfe48b552ec/helm/superset/values.yaml#L300-L317)
- [Bitnami PostgreSQL](https://github.com/bitnami/charts/blob/8f2277440b976d52785ba9149762ad8620a73d1f/bitnami/postgresql/values.yaml#L390-L410)
- [ArgoCD](https://github.com/argoproj/argo-helm/blob/b37a9e72a68d8fafe3cddd14cf2b3ed6722eff4a/charts/argo-cd/values.yaml#L1631-L1653)
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4819 from dnskr/helm_refactor_probes.
Closes #4819
eb692379f [dnskr] [K8S][HELM] Refactor liveness and readiness probes
Authored-by: dnskr
Signed-off-by: Cheng Pan
---
.../kyuubi/templates/kyuubi-deployment.yaml | 24 ++++++-------
charts/kyuubi/values.yaml | 34 ++++++++++---------
2 files changed, 30 insertions(+), 28 deletions(-)
diff --git a/charts/kyuubi/templates/kyuubi-deployment.yaml b/charts/kyuubi/templates/kyuubi-deployment.yaml
index beca0998a08..79d49a653dd 100644
--- a/charts/kyuubi/templates/kyuubi-deployment.yaml
+++ b/charts/kyuubi/templates/kyuubi-deployment.yaml
@@ -65,25 +65,25 @@ spec:
containerPort: {{ $frontend.port }}
{{- end }}
{{- end }}
- {{- if .Values.probe.liveness.enabled }}
+ {{- if .Values.livenessProbe.enabled }}
livenessProbe:
exec:
command: ["/bin/bash", "-c", "bin/kyuubi status"]
- initialDelaySeconds: {{ .Values.probe.liveness.initialDelaySeconds }}
- periodSeconds: {{ .Values.probe.liveness.periodSeconds }}
- timeoutSeconds: {{ .Values.probe.liveness.timeoutSeconds }}
- failureThreshold: {{ .Values.probe.liveness.failureThreshold }}
- successThreshold: {{ .Values.probe.liveness.successThreshold }}
+ initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+ successThreshold: {{ .Values.livenessProbe.successThreshold }}
{{- end }}
- {{- if .Values.probe.readiness.enabled }}
+ {{- if .Values.readinessProbe.enabled }}
readinessProbe:
exec:
command: ["/bin/bash", "-c", "$KYUUBI_HOME/bin/kyuubi status"]
- initialDelaySeconds: {{ .Values.probe.readiness.initialDelaySeconds }}
- periodSeconds: {{ .Values.probe.readiness.periodSeconds }}
- timeoutSeconds: {{ .Values.probe.readiness.timeoutSeconds }}
- failureThreshold: {{ .Values.probe.readiness.failureThreshold }}
- successThreshold: {{ .Values.probe.readiness.successThreshold }}
+ initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+ successThreshold: {{ .Values.readinessProbe.successThreshold }}
{{- end }}
{{- with .Values.resources }}
resources: {{- toYaml . | nindent 12 }}
diff --git a/charts/kyuubi/values.yaml b/charts/kyuubi/values.yaml
index 7eca7211393..d97fc982b1b 100644
--- a/charts/kyuubi/values.yaml
+++ b/charts/kyuubi/values.yaml
@@ -41,22 +41,6 @@ rbac:
resources: ["pods"]
verbs: ["create", "list", "delete"]
-probe:
- liveness:
- enabled: true
- initialDelaySeconds: 30
- periodSeconds: 10
- timeoutSeconds: 2
- failureThreshold: 10
- successThreshold: 1
- readiness:
- enabled: true
- initialDelaySeconds: 30
- periodSeconds: 10
- timeoutSeconds: 2
- failureThreshold: 10
- successThreshold: 1
-
server:
# Thrift Binary protocol (HiveServer2 compatible)
thriftBinary:
@@ -162,6 +146,24 @@ resources: {}
# cpu: 2
# memory: 4Gi
+# Liveness probe
+livenessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 2
+ failureThreshold: 10
+ successThreshold: 1
+
+# Readiness probe
+readinessProbe:
+ enabled: true
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 2
+ failureThreshold: 10
+ successThreshold: 1
+
# Constrain Kyuubi server pods to specific nodes
nodeSelector: {}
tolerations: []
From 9bf2714288104b94180141c1d6fe2e974da2e0d6 Mon Sep 17 00:00:00 2001
From: dnskr
Date: Thu, 11 May 2023 13:06:43 +0800
Subject: [PATCH 106/404] [KYUUBI #4818] [K8S][HELM] Update default Kyuubi
version to 1.7.1
### _Why are the changes needed?_
The change is needed to use latest stable released version of Kyuubi in the Helm chart by default.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4818 from dnskr/helm_update_kyuubi_version.
Closes #4818
46fad19f4 [dnskr] [K8S][HELM] Update default Kyuubi version to 1.7.1
Authored-by: dnskr
Signed-off-by: Cheng Pan
---
charts/kyuubi/Chart.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/charts/kyuubi/Chart.yaml b/charts/kyuubi/Chart.yaml
index 0abec9e5ef3..fa61794253f 100644
--- a/charts/kyuubi/Chart.yaml
+++ b/charts/kyuubi/Chart.yaml
@@ -20,7 +20,7 @@ name: kyuubi
description: A Helm chart for Kyuubi server
type: application
version: 0.1.0
-appVersion: 1.7.0
+appVersion: 1.7.1
home: https://kyuubi.apache.org
icon: https://raw.githubusercontent.com/apache/kyuubi/master/docs/imgs/logo.png
sources:
From 369c21a61156bf4bf82e8641e2d85eaa5acd8922 Mon Sep 17 00:00:00 2001
From: fwang12
Date: Thu, 11 May 2023 15:32:25 +0800
Subject: [PATCH 107/404] [KYUUBI #4816] [K8S] Correct the implementation of
cleanup terminated appInfo
### _Why are the changes needed?_
For `LocalCache`, `put` operation will `remove` the existing element.
https://github.com/apache/kyuubi/blob/299df0d7c2ad9ad6509861fada2d25ee2933e1fd/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala#L202-L207
And then trigger the removal listener.
https://github.com/apache/kyuubi/blob/299df0d7c2ad9ad6509861fada2d25ee2933e1fd/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala#L60-L68
and evict the existing app info from `appInfoStore`.
and then the `kyuubi.kubernetes.terminatedApplicationRetainPeriod` does not work.
And then we can only get `NOT_FOUND` application state.
So, we should check whether there is existing `cleanupTerminatedAppInfoTrigger` key before `put`.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [x] Add screenshots for manual tests if appropriate
Before:
After:
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4816 from turboFei/mark_if_absent.
Closes #4816
d767756fa [fwang12] put if absent
Authored-by: fwang12
Signed-off-by: Cheng Pan
---
.../kyuubi/engine/KubernetesApplicationOperation.scala | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
index a6fe286748a..0bd3127cb98 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/KubernetesApplicationOperation.scala
@@ -199,10 +199,11 @@ class KubernetesApplicationOperation extends ApplicationOperation with Logging {
error = Option(pod.getStatus.getReason)))
}
- private def markApplicationTerminated(pod: Pod): Unit = {
- cleanupTerminatedAppInfoTrigger.put(
- pod.getMetadata.getLabels.get(LABEL_KYUUBI_UNIQUE_KEY),
- toApplicationState(pod.getStatus.getPhase))
+ private def markApplicationTerminated(pod: Pod): Unit = synchronized {
+ val key = pod.getMetadata.getLabels.get(LABEL_KYUUBI_UNIQUE_KEY)
+ if (cleanupTerminatedAppInfoTrigger.getIfPresent(key) == null) {
+ cleanupTerminatedAppInfoTrigger.put(key, toApplicationState(pod.getStatus.getPhase))
+ }
}
}
From 4c37a882d24529007d0334cae8a76f4505b4815a Mon Sep 17 00:00:00 2001
From: Fu Chen
Date: Thu, 11 May 2023 21:05:09 +0800
Subject: [PATCH 108/404] [KYUUBI #4824] Bump Jackson from 2.14.2 to 2.15.0
### _Why are the changes needed?_
spark bump jackson from 2.14.2 to 2.15.0 in https://github.com/apache/spark/pull/40933
to fix
https://github.com/apache/kyuubi/actions/runs/4943800010/jobs/8838642303
```
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in stage 1.0 (TID 1) (localhost executor driver): java.lang.NoClassDefFoundError: com/fasterxml/jackson/core/StreamReadConstraints
at org.apache.spark.sql.catalyst.json.JSONOptions.buildJsonFactory(JSONOptions.scala:195)
at org.apache.spark.sql.catalyst.json.JsonInferSchema.$anonfun$infer$1(JsonInferSchema.scala:83)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:855)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:855)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:364)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:328)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:92)
at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)
at org.apache.spark.scheduler.Task.run(Task.scala:139)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:554)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1514)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:557)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:750)
Caused by: java.lang.ClassNotFoundException: com.fasterxml.jackson.core.StreamReadConstraints
at java.net.URLClassLoader.findClass(URLClassLoader.java:387)
at java.lang.ClassLoader.loadClass(ClassLoader.java:418)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:352)
at java.lang.ClassLoader.loadClass(ClassLoader.java:351)
... 16 more
```
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4824 from cfmcgrady/jackson-2.15.0.
Closes #4824
7a8a3de89 [Fu Chen] update dev/dependencyList
2d01b4b9d [Fu Chen] bump jackson
Authored-by: Fu Chen
Signed-off-by: liangbowen
---
dev/dependencyList | 20 ++++++++++----------
pom.xml | 2 +-
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/dev/dependencyList b/dev/dependencyList
index b216b7092e1..4af63d9cc77 100644
--- a/dev/dependencyList
+++ b/dev/dependencyList
@@ -68,16 +68,16 @@ httpclient/4.5.14//httpclient-4.5.14.jar
httpcore/4.4.16//httpcore-4.4.16.jar
httpmime/4.5.14//httpmime-4.5.14.jar
j2objc-annotations/1.3//j2objc-annotations-1.3.jar
-jackson-annotations/2.14.2//jackson-annotations-2.14.2.jar
-jackson-core/2.14.2//jackson-core-2.14.2.jar
-jackson-databind/2.14.2//jackson-databind-2.14.2.jar
-jackson-dataformat-yaml/2.14.2//jackson-dataformat-yaml-2.14.2.jar
-jackson-datatype-jdk8/2.14.2//jackson-datatype-jdk8-2.14.2.jar
-jackson-datatype-jsr310/2.14.2//jackson-datatype-jsr310-2.14.2.jar
-jackson-jaxrs-base/2.14.2//jackson-jaxrs-base-2.14.2.jar
-jackson-jaxrs-json-provider/2.14.2//jackson-jaxrs-json-provider-2.14.2.jar
-jackson-module-jaxb-annotations/2.14.2//jackson-module-jaxb-annotations-2.14.2.jar
-jackson-module-scala_2.12/2.14.2//jackson-module-scala_2.12-2.14.2.jar
+jackson-annotations/2.15.0//jackson-annotations-2.15.0.jar
+jackson-core/2.15.0//jackson-core-2.15.0.jar
+jackson-databind/2.15.0//jackson-databind-2.15.0.jar
+jackson-dataformat-yaml/2.15.0//jackson-dataformat-yaml-2.15.0.jar
+jackson-datatype-jdk8/2.15.0//jackson-datatype-jdk8-2.15.0.jar
+jackson-datatype-jsr310/2.15.0//jackson-datatype-jsr310-2.15.0.jar
+jackson-jaxrs-base/2.15.0//jackson-jaxrs-base-2.15.0.jar
+jackson-jaxrs-json-provider/2.15.0//jackson-jaxrs-json-provider-2.15.0.jar
+jackson-module-jaxb-annotations/2.15.0//jackson-module-jaxb-annotations-2.15.0.jar
+jackson-module-scala_2.12/2.15.0//jackson-module-scala_2.12-2.15.0.jar
jakarta.annotation-api/1.3.5//jakarta.annotation-api-1.3.5.jar
jakarta.inject/2.6.1//jakarta.inject-2.6.1.jar
jakarta.servlet-api/4.0.4//jakarta.servlet-api-4.0.4.jar
diff --git a/pom.xml b/pom.xml
index cff8f965627..9804fc9afe4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -157,7 +157,7 @@
4.5.144.4.161.2.0
- 2.14.2
+ 2.15.04.0.42.3.21.2.2
From ad97b0357920d3670ef8035c69449578bd8d21f4 Mon Sep 17 00:00:00 2001
From: Fu Chen
Date: Thu, 11 May 2023 22:35:52 +0800
Subject: [PATCH 109/404] [KYUUBI #4827] Bump Apache Arrow from 11.0.0 to
12.0.0
### _Why are the changes needed?_
https://arrow.apache.org/release/12.0.0.html
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [ ] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4827 from cfmcgrady/arrow-12.0.0.
Closes #4827
324a8ddbf [Fu Chen] bump arrow from 11.0.0 to 12.0.0
Authored-by: Fu Chen
Signed-off-by: Cheng Pan
---
dev/dependencyList | 8 ++++----
pom.xml | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/dev/dependencyList b/dev/dependencyList
index 4af63d9cc77..6e5673f9d9b 100644
--- a/dev/dependencyList
+++ b/dev/dependencyList
@@ -22,10 +22,10 @@ annotations/4.1.1.4//annotations-4.1.1.4.jar
antlr-runtime/3.5.3//antlr-runtime-3.5.3.jar
antlr4-runtime/4.9.3//antlr4-runtime-4.9.3.jar
aopalliance-repackaged/2.6.1//aopalliance-repackaged-2.6.1.jar
-arrow-format/11.0.0//arrow-format-11.0.0.jar
-arrow-memory-core/11.0.0//arrow-memory-core-11.0.0.jar
-arrow-memory-netty/11.0.0//arrow-memory-netty-11.0.0.jar
-arrow-vector/11.0.0//arrow-vector-11.0.0.jar
+arrow-format/12.0.0//arrow-format-12.0.0.jar
+arrow-memory-core/12.0.0//arrow-memory-core-12.0.0.jar
+arrow-memory-netty/12.0.0//arrow-memory-netty-12.0.0.jar
+arrow-vector/12.0.0//arrow-vector-12.0.0.jar
classgraph/4.8.138//classgraph-4.8.138.jar
commons-codec/1.15//commons-codec-1.15.jar
commons-collections/3.2.2//commons-collections-3.2.2.jar
diff --git a/pom.xml b/pom.xml
index 9804fc9afe4..ca596d7aeb3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -116,7 +116,7 @@
2.122.8.1
- 11.0.0
+ 12.0.04.9.34.3.4
From 3fc23970c6b971089a381d7b2900f02e23b2cdb7 Mon Sep 17 00:00:00 2001
From: fwang12
Date: Fri, 12 May 2023 08:30:18 +0800
Subject: [PATCH 110/404] [KYUUBI #4792] [MINOR] Enhance hardcode session
keywords and remove unused code
### _Why are the changes needed?_
As title.
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests) locally before make a pull request
Closes #4792 from turboFei/remove_unused.
Closes #4792
fe568af7e [fwang12] server conf
97f510020 [fwang12] save
c44e70a58 [fwang12] remove unused code
Authored-by: fwang12
Signed-off-by: fwang12
---
.../kyuubi/engine/flink/session/FlinkSessionImpl.scala | 6 +++---
.../kyuubi/engine/spark/session/SparkSessionImpl.scala | 6 +++---
.../kyuubi/engine/trino/session/TrinoSessionImpl.scala | 8 ++++----
.../main/scala/org/apache/kyuubi/config/KyuubiConf.scala | 1 +
.../main/scala/org/apache/kyuubi/session/package.scala | 2 ++
.../scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala | 9 ++-------
.../main/scala/org/apache/kyuubi/engine/EngineRef.scala | 4 +++-
.../org/apache/kyuubi/session/KyuubiSessionImpl.scala | 6 ++----
8 files changed, 20 insertions(+), 22 deletions(-)
diff --git a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala
index 09f5ac94319..10a48f1a143 100644
--- a/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala
+++ b/externals/kyuubi-flink-sql-engine/src/main/scala/org/apache/kyuubi/engine/flink/session/FlinkSessionImpl.scala
@@ -30,7 +30,7 @@ import org.apache.hive.service.rpc.thrift.{TGetInfoType, TGetInfoValue, TProtoco
import org.apache.kyuubi.KyuubiSQLException
import org.apache.kyuubi.config.KyuubiReservedKeys.KYUUBI_SESSION_HANDLE_KEY
import org.apache.kyuubi.engine.flink.FlinkEngineUtils
-import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager}
+import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager, USE_CATALOG, USE_DATABASE}
class FlinkSessionImpl(
protocol: TProtocolVersion,
@@ -62,10 +62,10 @@ class FlinkSessionImpl(
val executor = fSession.createExecutor(Configuration.fromMap(fSession.getSessionConfig))
val (useCatalogAndDatabaseConf, otherConf) = normalizedConf.partition { case (k, _) =>
- Array("use:catalog", "use:database").contains(k)
+ Array(USE_CATALOG, USE_DATABASE).contains(k)
}
- useCatalogAndDatabaseConf.get("use:catalog").foreach { catalog =>
+ useCatalogAndDatabaseConf.get(USE_CATALOG).foreach { catalog =>
try {
executor.executeStatement(OperationHandle.create, s"USE CATALOG $catalog")
} catch {
diff --git a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSessionImpl.scala b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSessionImpl.scala
index 96fc43e857d..40a0c8c7fb2 100644
--- a/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSessionImpl.scala
+++ b/externals/kyuubi-spark-sql-engine/src/main/scala/org/apache/kyuubi/engine/spark/session/SparkSessionImpl.scala
@@ -28,7 +28,7 @@ import org.apache.kyuubi.engine.spark.shim.SparkCatalogShim
import org.apache.kyuubi.engine.spark.udf.KDFRegistry
import org.apache.kyuubi.events.EventBus
import org.apache.kyuubi.operation.{Operation, OperationHandle}
-import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager}
+import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager, USE_CATALOG, USE_DATABASE}
class SparkSessionImpl(
protocol: TProtocolVersion,
@@ -56,10 +56,10 @@ class SparkSessionImpl(
override def open(): Unit = {
val (useCatalogAndDatabaseConf, otherConf) = normalizedConf.partition { case (k, _) =>
- Array("use:catalog", "use:database").contains(k)
+ Array(USE_CATALOG, USE_DATABASE).contains(k)
}
- useCatalogAndDatabaseConf.get("use:catalog").foreach { catalog =>
+ useCatalogAndDatabaseConf.get(USE_CATALOG).foreach { catalog =>
try {
SparkCatalogShim().setCurrentCatalog(spark, catalog)
} catch {
diff --git a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/session/TrinoSessionImpl.scala b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/session/TrinoSessionImpl.scala
index 1a96bed73f5..6869e54dce0 100644
--- a/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/session/TrinoSessionImpl.scala
+++ b/externals/kyuubi-trino-engine/src/main/scala/org/apache/kyuubi/engine/trino/session/TrinoSessionImpl.scala
@@ -35,7 +35,7 @@ import org.apache.kyuubi.engine.trino.{TrinoConf, TrinoContext, TrinoStatement}
import org.apache.kyuubi.engine.trino.event.TrinoSessionEvent
import org.apache.kyuubi.events.EventBus
import org.apache.kyuubi.operation.{Operation, OperationHandle}
-import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager}
+import org.apache.kyuubi.session.{AbstractSession, SessionHandle, SessionManager, USE_CATALOG, USE_DATABASE}
class TrinoSessionImpl(
protocol: TProtocolVersion,
@@ -59,12 +59,12 @@ class TrinoSessionImpl(
override def open(): Unit = {
val (useCatalogAndDatabaseConf, _) = normalizedConf.partition { case (k, _) =>
- Array("use:catalog", "use:database").contains(k)
+ Array(USE_CATALOG, USE_DATABASE).contains(k)
}
useCatalogAndDatabaseConf.foreach {
- case ("use:catalog", catalog) => catalogName = catalog
- case ("use:database", database) => databaseName = database
+ case (USE_CATALOG, catalog) => catalogName = catalog
+ case (USE_DATABASE, database) => databaseName = database
}
val httpClient = new OkHttpClient.Builder().build()
diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
index 8da336102ce..9ae1898c5fa 100644
--- a/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
+++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/config/KyuubiConf.scala
@@ -1863,6 +1863,7 @@ object KyuubiConf {
.doc("This parameter is introduced as a server-side parameter " +
"controlling the upper limit of the engine pool.")
.version("1.4.0")
+ .serverOnly
.intConf
.checkValue(s => s > 0 && s < 33, "Invalid engine pool threshold, it should be in [1, 32]")
.createWithDefault(9)
diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/package.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/package.scala
index 40abded985c..63b17dd4d2e 100644
--- a/kyuubi-common/src/main/scala/org/apache/kyuubi/session/package.scala
+++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/session/package.scala
@@ -25,6 +25,8 @@ package object session {
val HIVECONF_PREFIX = "hiveconf:"
val HIVEVAR_PREFIX = "hivevar:"
val METACONF_PREFIX = "metaconf:"
+ val USE_CATALOG = "use:catalog"
+ val USE_DATABASE = "use:database"
val SPARK_PREFIX = "spark."
}
diff --git a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala
index a63646d9bd2..28806e915e2 100644
--- a/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala
+++ b/kyuubi-common/src/main/scala/org/apache/kyuubi/util/KyuubiHadoopUtils.scala
@@ -26,7 +26,7 @@ import scala.util.{Failure, Success, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
import org.apache.hadoop.io.Text
-import org.apache.hadoop.security.{Credentials, SecurityUtil, UserGroupInformation}
+import org.apache.hadoop.security.{Credentials, SecurityUtil}
import org.apache.hadoop.security.token.{Token, TokenIdentifier}
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier
import org.apache.hadoop.yarn.conf.YarnConfiguration
@@ -36,12 +36,7 @@ import org.apache.kyuubi.config.KyuubiConf
object KyuubiHadoopUtils extends Logging {
- private val subjectField =
- classOf[UserGroupInformation].getDeclaredField("subject")
- subjectField.setAccessible(true)
-
- private val tokenMapField =
- classOf[Credentials].getDeclaredField("tokenMap")
+ private val tokenMapField = classOf[Credentials].getDeclaredField("tokenMap")
tokenMapField.setAccessible(true)
def newHadoopConf(
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
index 765f3694965..227cdd6c89c 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/engine/EngineRef.scala
@@ -43,6 +43,7 @@ import org.apache.kyuubi.metrics.MetricsConstants.{ENGINE_FAIL, ENGINE_TIMEOUT,
import org.apache.kyuubi.metrics.MetricsSystem
import org.apache.kyuubi.operation.log.OperationLog
import org.apache.kyuubi.plugin.GroupProvider
+import org.apache.kyuubi.server.KyuubiServer
/**
* The description and functionality of an engine at server side
@@ -69,7 +70,8 @@ private[kyuubi] class EngineRef(
private val engineType: EngineType = EngineType.withName(conf.get(ENGINE_TYPE))
// Server-side engine pool size threshold
- private val poolThreshold: Int = conf.get(ENGINE_POOL_SIZE_THRESHOLD)
+ private val poolThreshold: Int = Option(KyuubiServer.kyuubiServer).map(_.getConf)
+ .getOrElse(KyuubiConf()).get(ENGINE_POOL_SIZE_THRESHOLD)
private val clientPoolSize: Int = conf.get(ENGINE_POOL_SIZE)
diff --git a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionImpl.scala b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionImpl.scala
index 80df5c44dd0..8d5132ba4b1 100644
--- a/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionImpl.scala
+++ b/kyuubi-server/src/main/scala/org/apache/kyuubi/session/KyuubiSessionImpl.scala
@@ -64,11 +64,9 @@ class KyuubiSessionImpl(
}
}
- // TODO: needs improve the hardcode
optimizedConf.foreach {
- case ("use:catalog", _) =>
- case ("use:database", _) =>
- case ("kyuubi.engine.pool.size.threshold", _) =>
+ case (USE_CATALOG, _) =>
+ case (USE_DATABASE, _) =>
case (key, value) => sessionConf.set(key, value)
}
From 1e310a0818f23dd7b8f2d4afd17795794fed46ea Mon Sep 17 00:00:00 2001
From: Cheng Pan
Date: Fri, 12 May 2023 08:38:26 +0800
Subject: [PATCH 111/404] [KYUUBI #4828] [BUILD] Exclude macOS tar extended
metadata in build/dist
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
### _Why are the changes needed?_
Add args `--no-mac-metadata --no-xattrs --no-fflags` to `tar` on macOS in `build/dist` to exclude macOS-specific extended metadata.
The binary tarball created on macOS includes extended macOS-specific metadata and xattrs, which causes warnings when unarchiving it on Linux.
Step to reproduce
1. create tarball on macOS (13.3.1)
```
➜ apache-kyuubi git:(master) tar --version
bsdtar 3.5.3 - libarchive 3.5.3 zlib/1.2.11 liblzma/5.0.5 bz2lib/1.0.8
```
```
➜ apache-kyuubi git:(master) build/dist --tgz
```
2. unarchive the binary tarball on Linux (CentOS-7)
```
➜ ~ tar --version
tar (GNU tar) 1.26
Copyright (C) 2011 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later .
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Written by John Gilmore and Jay Fenlason.
```
```
➜ ~ tar -xzf apache-kyuubi-1.8.0-SNAPSHOT-bin.tgz
tar: Ignoring unknown extended header keyword `SCHILY.fflags'
tar: Ignoring unknown extended header keyword `LIBARCHIVE.xattr.com.apple.FinderInfo'
```
### _How was this patch tested?_
- [x] Manual tests
Create binary tarball on macOS then unarchive on Linux, warnings disappear after this change.
Closes #4828 from pan3793/dist.
Closes #4828
7bc49d847 [Cheng Pan] [BUILD] Exclude macOS tar extended metadata in build/dist
Authored-by: Cheng Pan
Signed-off-by: liangbowen