Skip to content

Commit

Permalink
Merge branch 'dev' into fix/switch_js
Browse files Browse the repository at this point in the history
  • Loading branch information
rickchengx committed Jan 29, 2024
2 parents e90b6cb + edbf5cd commit 301c70e
Show file tree
Hide file tree
Showing 97 changed files with 824 additions and 395 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/backend.yml
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ jobs:
fail-fast: false
matrix:
db: ["mysql", "postgresql"]
version: ["2.0.9", "3.0.6", "3.1.8"]
version: ["2.0.9", "3.0.6", "3.1.8", "3.2.0"]
steps:
- name: Set up JDK 8
uses: actions/setup-java@v2
Expand All @@ -171,7 +171,7 @@ jobs:
echo "DATABASE_VERSION=${VERSION//\./}" >> $GITHUB_ENV
- name: Prepare
run: |
wget https://dlcdn.apache.org/dolphinscheduler/${{ matrix.version }}/apache-dolphinscheduler-${{ matrix.version }}-bin.tar.gz -P dolphinscheduler/${{ matrix.version }}
wget https://archive.apache.org/dist/dolphinscheduler/${{ matrix.version }}/apache-dolphinscheduler-${{ matrix.version }}-bin.tar.gz -P dolphinscheduler/${{ matrix.version }}
tar -xzf dolphinscheduler/${{ matrix.version }}/apache-dolphinscheduler-${{ matrix.version }}-bin.tar.gz -C dolphinscheduler/${{ matrix.version }} --strip-components 1
tar -xzf dolphinscheduler/dev/apache-dolphinscheduler-*-bin.tar.gz -C dolphinscheduler/dev --strip-components 1
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ Welcome to join the Apache DolphinScheduler community by:

- Join the [DolphinScheduler Slack](https://s.apache.org/dolphinscheduler-slack) to keep in touch with the community
- Follow the [DolphinScheduler Twitter](https://twitter.com/dolphinschedule) and get the latest news <!-- markdown-link-check-disable-line -->
- Subscribe DolphinScheduler mail list, users@dolphinscheduler.apache.org for user and dev@dolphinscheduler.apache.org for developer
- Subscribe DolphinScheduler mail list, [users@dolphinscheduler.apache.org](mailto:users-subscribe@dolphinscheduler.apache.org) for user and [dev@dolphinscheduler.apache.org](mailto:dev-subscribe@dolphinscheduler.apache.org) for developer

# Landscapes

Expand Down
2 changes: 1 addition & 1 deletion README_zh_CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ DolphinScheduler 的主要特性如下:

- 加入 [DolphinScheduler Slack](https://s.apache.org/dolphinscheduler-slack)
- 关注 [DolphinScheduler Twitter](https://twitter.com/dolphinschedule) 来获取最新消息 <!-- markdown-link-check-disable-line -->
- 订阅 DolphinScheduler 邮件列表, 用户订阅 users@dolphinscheduler.apache.org 开发者请订阅 dev@dolphinscheduler.apache.org
- 订阅 DolphinScheduler 邮件列表, 用户订阅 [users@dolphinscheduler.apache.org](mailto:users-subscribe@dolphinscheduler.apache.org) 开发者请订阅 [dev@dolphinscheduler.apache.org](mailto:dev-subscribe@dolphinscheduler.apache.org)

# Landscapes

Expand Down
2 changes: 2 additions & 0 deletions docs/docs/en/guide/installation/kubernetes.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ Kubernetes deployment is DolphinScheduler deployment in a Kubernetes cluster, wh

If you are a new hand and want to experience DolphinScheduler functions, we recommend you install follow [Standalone deployment](standalone.md). If you want to experience more complete functions and schedule massive tasks, we recommend you install follow [pseudo-cluster deployment](pseudo-cluster.md). If you want to deploy DolphinScheduler in production, we recommend you follow [cluster deployment](cluster.md) or [Kubernetes deployment](kubernetes.md).

> **Tip**: You can also try [DolphinScheduler K8S Operator](https://github.com/apache/dolphinscheduler-operator),which is current on alpha1 stage
## Prerequisites

- [Helm](https://helm.sh/) version 3.1.0+
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/en/guide/task/flink.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Flink task type, used to execute Flink programs. For Flink nodes:
| Parallelism | Used to set the degree of parallelism for executing Flink tasks. |
| Yarn queue | Used to set the yarn queue, use `default` queue by default. |
| Main program parameters | Set the input parameters for the Flink program and support the substitution of custom parameter variables. |
| Optional parameters | Support `--jar`, `--files`,` --archives`, `--conf` format. |
| Optional parameters | Set the flink command options, such as `-D`, `-C`, `-yt`. |
| Custom parameter | It is a local user-defined parameter for Flink, and will replace the content with `${variable}` in the script. |

## Task Example
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/en/guide/task/spark.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ Spark task type for executing Spark application. When executing the Spark task,
| Executor memory size | Set the size of Executor memories, which can be set according to the actual production environment. |
| Yarn queue | Set the yarn queue, use `default` queue by default. |
| Main program parameters | Set the input parameters of the Spark program and support the substitution of custom parameter variables. |
| Optional parameters | Support `--jars`, `--files`,` --archives`, `--conf` format. |
| Optional parameters | Set the spark command options, such as `--jars`, `--files`,` --archives`, `--conf`. |
| Resource | Appoint resource files in the `Resource` if parameters refer to them. |
| Custom parameter | It is a local user-defined parameter for Spark, and will replace the content with `${variable}` in the script. |
| Predecessor task | Selecting a predecessor task for the current task, will set the selected predecessor task as upstream of the current task. |
Expand Down
2 changes: 2 additions & 0 deletions docs/docs/zh/guide/installation/kubernetes.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ Kubernetes 部署目的是在 Kubernetes 集群中部署 DolphinScheduler 服务

如果你是新手,想要体验 DolphinScheduler 的功能,推荐使用[Standalone](standalone.md)方式体检。如果你想体验更完整的功能,或者更大的任务量,推荐使用[伪集群部署](pseudo-cluster.md)。如果你是在生产中使用,推荐使用[集群部署](cluster.md)或者[kubernetes](kubernetes.md)

> **提示**: 您也可以尝试使用[DolphinScheduler K8S Operator](https://github.com/apache/dolphinscheduler-operator),目前处于 alpha1 阶段
## 先决条件

- [Helm](https://helm.sh/) 3.1.0+
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/zh/guide/task/flink.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Flink 任务类型,用于执行 Flink 程序。对于 Flink 节点:
| 并行度 | 用于设置执行 Flink 任务的并行度 |
| Yarn 队列 | 用于设置 Yarn 队列,默认使用 default 队列 |
| 主程序参数 | 设置 Flink 程序的输入参数,支持自定义参数变量的替换 |
| 选项参数 | 支持 `--jar``--files``--archives``--conf` 格式 |
| 选项参数 | 设置Flink命令的选项参数,例如`-D`, `-C`, `-yt` |
| 自定义参数 | 是 Flink 局部的用户自定义参数,会替换脚本中以 ${变量} 的内容 |

## 任务样例
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/zh/guide/task/spark.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Spark 任务类型用于执行 Spark 应用。对于 Spark 节点,worker 支
- Executor 内存数:用于设置 Executor 内存数,可根据实际生产环境设置对应的内存数。
- Yarn 队列:用于设置 Yarn 队列,默认使用 default 队列。
- 主程序参数:设置 Spark 程序的输入参数,支持自定义参数变量的替换。
- 选项参数:支持 `--jars``--files``--archives``--conf` 格式
- 选项参数:设置Spark命令的选项参数,例如`--jars``--files``--archives``--conf`
- 资源:如果其他参数中引用了资源文件,需要在资源中选择指定。
- 自定义参数:是 Spark 局部的用户自定义参数,会替换脚本中以 ${变量} 的内容。

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,15 @@

package org.apache.dolphinscheduler.alert;

import org.apache.dolphinscheduler.alert.metrics.AlertServerMetrics;
import org.apache.dolphinscheduler.alert.plugin.AlertPluginManager;
import org.apache.dolphinscheduler.alert.registry.AlertRegistryClient;
import org.apache.dolphinscheduler.alert.rpc.AlertRpcServer;
import org.apache.dolphinscheduler.alert.service.AlertBootstrapService;
import org.apache.dolphinscheduler.alert.service.ListenerEventPostService;
import org.apache.dolphinscheduler.common.constants.Constants;
import org.apache.dolphinscheduler.common.lifecycle.ServerLifeCycleManager;
import org.apache.dolphinscheduler.common.thread.DefaultUncaughtExceptionHandler;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;

import javax.annotation.PreDestroy;
Expand Down Expand Up @@ -54,6 +56,8 @@ public class AlertServer {
private AlertRegistryClient alertRegistryClient;

public static void main(String[] args) {
AlertServerMetrics.registerUncachedException(DefaultUncaughtExceptionHandler::getUncaughtExceptionCount);
Thread.setDefaultUncaughtExceptionHandler(DefaultUncaughtExceptionHandler.getInstance());
Thread.currentThread().setName(Constants.THREAD_NAME_ALERT_SERVER);
new SpringApplicationBuilder(AlertServer.class).run(args);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,12 @@ public void registerPendingAlertGauge(final Supplier<Number> supplier) {
.register(Metrics.globalRegistry);
}

public static void registerUncachedException(final Supplier<Number> supplier) {
Gauge.builder("ds.alert.uncached.exception", supplier)
.description("number of uncached exception")
.register(Metrics.globalRegistry);
}

public void incAlertSuccessCount() {
alertSuccessCounter.increment();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@

package org.apache.dolphinscheduler.api;

import org.apache.dolphinscheduler.api.metrics.ApiServerMetrics;
import org.apache.dolphinscheduler.common.enums.PluginType;
import org.apache.dolphinscheduler.common.thread.DefaultUncaughtExceptionHandler;
import org.apache.dolphinscheduler.dao.PluginDao;
import org.apache.dolphinscheduler.dao.entity.PluginDefine;
import org.apache.dolphinscheduler.plugin.task.api.TaskChannelFactory;
Expand Down Expand Up @@ -51,6 +53,8 @@ public class ApiApplicationServer {
private PluginDao pluginDao;

public static void main(String[] args) {
ApiServerMetrics.registerUncachedException(DefaultUncaughtExceptionHandler::getUncaughtExceptionCount);
Thread.setDefaultUncaughtExceptionHandler(DefaultUncaughtExceptionHandler.getInstance());
SpringApplication.run(ApiApplicationServer.class);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ public class AlertGroupController extends BaseController {
* @param description description
* @return create result code
*/
@Operation(summary = "createAlertgroup", description = "CREATE_ALERT_GROUP_NOTES")
@Operation(summary = "createAlertGroup", description = "CREATE_ALERT_GROUP_NOTES")
@Parameters({
@Parameter(name = "groupName", description = "GROUP_NAME", required = true, schema = @Schema(implementation = String.class)),
@Parameter(name = "description", description = "DESC", schema = @Schema(implementation = String.class)),
Expand All @@ -100,7 +100,7 @@ public Result<AlertGroup> createAlertGroup(@Parameter(hidden = true) @RequestAtt
* @param loginUser login user
* @return alert group list
*/
@Operation(summary = "listAlertgroupById", description = "QUERY_ALERT_GROUP_LIST_NOTES")
@Operation(summary = "listAlertGroupById", description = "QUERY_ALERT_GROUP_LIST_NOTES")
@GetMapping(value = "/list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_ALL_ALERTGROUP_ERROR)
Expand All @@ -116,7 +116,7 @@ public Result<List<AlertGroup>> list(@Parameter(hidden = true) @RequestAttribute
* @param loginUser login user
* @return normal alert group list
*/
@Operation(summary = "listNormalAlertgroupById", description = "QUERY_ALERT_GROUP_LIST_NOTES")
@Operation(summary = "listNormalAlertGroupById", description = "QUERY_ALERT_GROUP_LIST_NOTES")
@GetMapping(value = "/normal-list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_ALL_ALERTGROUP_ERROR)
Expand Down Expand Up @@ -155,7 +155,7 @@ public Result<PageInfo<AlertGroup>> listPaging(@Parameter(hidden = true) @Reques
}

/**
* check alarm group detail by Id
* check alarm group detail by id
*
* @param loginUser login user
* @param id alert group id
Expand Down Expand Up @@ -185,7 +185,7 @@ public Result<AlertGroup> queryAlertGroupById(@Parameter(hidden = true) @Request
* @param description description
* @return update result code
*/
@Operation(summary = "updateAlertgroup", description = "UPDATE_ALERT_GROUP_NOTES")
@Operation(summary = "updateAlertGroup", description = "UPDATE_ALERT_GROUP_NOTES")
@Parameters({
@Parameter(name = "id", description = "ALERT_GROUP_ID", required = true, schema = @Schema(implementation = int.class, example = "100")),
@Parameter(name = "groupName", description = "GROUP_NAME", required = true, schema = @Schema(implementation = String.class)),
Expand All @@ -212,7 +212,7 @@ public Result<AlertGroup> updateAlertGroupById(@Parameter(hidden = true) @Reques
* @param id alert group id
* @return delete result code
*/
@Operation(summary = "delAlertgroupById", description = "DELETE_ALERT_GROUP_BY_ID_NOTES")
@Operation(summary = "delAlertGroupById", description = "DELETE_ALERT_GROUP_BY_ID_NOTES")
@Parameters({
@Parameter(name = "id", description = "ALERT_GROUP_ID", required = true, schema = @Schema(implementation = int.class, example = "100"))
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ public Result success(Object list) {

/**
* return the data use Map format, for example, passing the value of key, value, passing a value
* eg. "/user/add" then return user name: zhangsan
* e.g. "/user/add" then return username: zhangsan
*
* @param msg message
* @param object success object data
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ public class CloudController extends BaseController {
private CloudService cloudService;

/**
* get datafactory list
* get data factory list
*
* @param loginUser login user
* @return datafactory name list
* @return data factory name list
*/
@Operation(summary = "listDataFactory", description = "LIST_DATA_FACTORY")
@GetMapping(value = "/azure/datafactory/factories")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ public class ClusterController extends BaseController {
@PostMapping(value = "/create")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_CLUSTER_ERROR)
public Result<Long> createProject(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
public Result<Long> createCluster(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("name") String name,
@RequestParam("config") String config,
@RequestParam(value = "description", required = false) String description) {
Expand Down
Loading

0 comments on commit 301c70e

Please sign in to comment.