diff --git a/src/graph/executor/admin/SubmitJobExecutor.cpp b/src/graph/executor/admin/SubmitJobExecutor.cpp index 8353783b598..592acdc0f0f 100644 --- a/src/graph/executor/admin/SubmitJobExecutor.cpp +++ b/src/graph/executor/admin/SubmitJobExecutor.cpp @@ -35,87 +35,97 @@ folly::Future SubmitJobExecutor::execute() { LOG(ERROR) << resp.status().toString(); return std::move(resp).status(); } - switch (jobOp) { - case meta::cpp2::AdminJobOp::ADD: { - nebula::DataSet v({"New Job Id"}); - DCHECK(resp.value().job_id_ref().has_value()); - if (!resp.value().job_id_ref().has_value()) { - return Status::Error("Response unexpected."); - } - v.emplace_back(nebula::Row({*resp.value().job_id_ref()})); - return finish(std::move(v)); - } - case meta::cpp2::AdminJobOp::RECOVER: { - nebula::DataSet v({"Recovered job num"}); - DCHECK(resp.value().recovered_job_num_ref().has_value()); - if (!resp.value().recovered_job_num_ref().has_value()) { - return Status::Error("Response unexpected."); - } - v.emplace_back(nebula::Row({*resp.value().recovered_job_num_ref()})); - return finish(std::move(v)); - } - case meta::cpp2::AdminJobOp::SHOW: { - nebula::DataSet v( - {"Job Id(TaskId)", "Command(Dest)", "Status", "Start Time", "Stop Time"}); - DCHECK(resp.value().job_desc_ref().has_value()); - if (!resp.value().job_desc_ref().has_value()) { - return Status::Error("Response unexpected."); - } - DCHECK(resp.value().task_desc_ref().has_value()); - if (!resp.value().task_desc_ref().has_value()) { - return Status::Error("Response unexpected"); - } - auto &jobDesc = *resp.value().job_desc_ref(); - // job desc - v.emplace_back(nebula::Row({ - jobDesc.front().get_id(), - apache::thrift::util::enumNameSafe(jobDesc.front().get_cmd()), - apache::thrift::util::enumNameSafe(jobDesc.front().get_status()), - time::TimeConversion::unixSecondsToDateTime(jobDesc.front().get_start_time()), - time::TimeConversion::unixSecondsToDateTime(jobDesc.front().get_stop_time()), - })); - // tasks desc - auto &tasksDesc = *resp.value().get_task_desc(); - for (const auto &taskDesc : tasksDesc) { - v.emplace_back(nebula::Row({ - taskDesc.get_task_id(), - taskDesc.get_host().host, - apache::thrift::util::enumNameSafe(taskDesc.get_status()), - time::TimeConversion::unixSecondsToDateTime(taskDesc.get_start_time()), - time::TimeConversion::unixSecondsToDateTime(taskDesc.get_stop_time()), - })); - } - return finish(std::move(v)); - } - case meta::cpp2::AdminJobOp::SHOW_All: { - nebula::DataSet v({"Job Id", "Command", "Status", "Start Time", "Stop Time"}); - DCHECK(resp.value().job_desc_ref().has_value()); - if (!resp.value().job_desc_ref().has_value()) { - return Status::Error("Response unexpected"); - } - const auto &jobsDesc = *resp.value().job_desc_ref(); - for (const auto &jobDesc : jobsDesc) { - v.emplace_back(nebula::Row({ - jobDesc.get_id(), - apache::thrift::util::enumNameSafe(jobDesc.get_cmd()), - apache::thrift::util::enumNameSafe(jobDesc.get_status()), - time::TimeConversion::unixSecondsToDateTime(jobDesc.get_start_time()), - time::TimeConversion::unixSecondsToDateTime(jobDesc.get_stop_time()), - })); - } - return finish(std::move(v)); - } - case meta::cpp2::AdminJobOp::STOP: { - nebula::DataSet v({"Result"}); - v.emplace_back(nebula::Row({"Job stopped"})); - return finish(std::move(v)); - } - // no default so the compiler will warning when lack - } - DLOG(FATAL) << "Unknown job operation " << static_cast(jobOp); - return Status::Error("Unknown job job operation %d.", static_cast(jobOp)); + auto status = buildResult(jobOp, std::move(resp).value()); + NG_RETURN_IF_ERROR(status); + return finish(std::move(status).value()); }); } +StatusOr SubmitJobExecutor::buildResult(meta::cpp2::AdminJobOp jobOp, + meta::cpp2::AdminJobResult &&resp) { + switch (jobOp) { + case meta::cpp2::AdminJobOp::ADD: { + nebula::DataSet v({"New Job Id"}); + DCHECK(resp.job_id_ref().has_value()); + if (!resp.job_id_ref().has_value()) { + return Status::Error("Response unexpected."); + } + v.emplace_back(nebula::Row({*resp.job_id_ref()})); + return v; + } + case meta::cpp2::AdminJobOp::RECOVER: { + nebula::DataSet v({"Recovered job num"}); + DCHECK(resp.recovered_job_num_ref().has_value()); + if (!resp.recovered_job_num_ref().has_value()) { + return Status::Error("Response unexpected."); + } + v.emplace_back(nebula::Row({*resp.recovered_job_num_ref()})); + return v; + } + case meta::cpp2::AdminJobOp::SHOW: { + nebula::DataSet v({"Job Id(TaskId)", "Command(Dest)", "Status", "Start Time", "Stop Time"}); + DCHECK(resp.job_desc_ref().has_value()); + if (!resp.job_desc_ref().has_value()) { + return Status::Error("Response unexpected."); + } + DCHECK(resp.task_desc_ref().has_value()); + if (!resp.task_desc_ref().has_value()) { + return Status::Error("Response unexpected"); + } + auto &jobDesc = *resp.job_desc_ref(); + // job desc + v.emplace_back(nebula::Row({ + jobDesc.front().get_id(), + apache::thrift::util::enumNameSafe(jobDesc.front().get_cmd()), + apache::thrift::util::enumNameSafe(jobDesc.front().get_status()), + convertJobTimestampToDateTime(jobDesc.front().get_start_time()), + convertJobTimestampToDateTime(jobDesc.front().get_stop_time()), + })); + // tasks desc + auto &tasksDesc = *resp.get_task_desc(); + for (const auto &taskDesc : tasksDesc) { + v.emplace_back(nebula::Row({ + taskDesc.get_task_id(), + taskDesc.get_host().host, + apache::thrift::util::enumNameSafe(taskDesc.get_status()), + convertJobTimestampToDateTime(taskDesc.get_start_time()), + convertJobTimestampToDateTime(taskDesc.get_stop_time()), + })); + } + return v; + } + case meta::cpp2::AdminJobOp::SHOW_All: { + nebula::DataSet v({"Job Id", "Command", "Status", "Start Time", "Stop Time"}); + DCHECK(resp.job_desc_ref().has_value()); + if (!resp.job_desc_ref().has_value()) { + return Status::Error("Response unexpected"); + } + const auto &jobsDesc = *resp.job_desc_ref(); + for (const auto &jobDesc : jobsDesc) { + v.emplace_back(nebula::Row({ + jobDesc.get_id(), + apache::thrift::util::enumNameSafe(jobDesc.get_cmd()), + apache::thrift::util::enumNameSafe(jobDesc.get_status()), + convertJobTimestampToDateTime(jobDesc.get_start_time()), + convertJobTimestampToDateTime(jobDesc.get_stop_time()), + })); + } + return v; + } + case meta::cpp2::AdminJobOp::STOP: { + nebula::DataSet v({"Result"}); + v.emplace_back(nebula::Row({"Job stopped"})); + return v; + } + // no default so the compiler will warning when lack + } + DLOG(FATAL) << "Unknown job operation " << static_cast(jobOp); + return Status::Error("Unknown job job operation %d.", static_cast(jobOp)); +} + +Value SubmitJobExecutor::convertJobTimestampToDateTime(int64_t timestamp) { + return timestamp > 0 ? Value(time::TimeConversion::unixSecondsToDateTime(timestamp)) + : Value::kEmpty; +} } // namespace graph } // namespace nebula diff --git a/src/graph/executor/admin/SubmitJobExecutor.h b/src/graph/executor/admin/SubmitJobExecutor.h index dc8da2e75bf..08f777f2629 100644 --- a/src/graph/executor/admin/SubmitJobExecutor.h +++ b/src/graph/executor/admin/SubmitJobExecutor.h @@ -18,6 +18,11 @@ class SubmitJobExecutor final : public Executor { : Executor("SubmitJobExecutor", node, qctx) {} folly::Future execute() override; + + private: + FRIEND_TEST(JobTest, JobFinishTime); + StatusOr buildResult(meta::cpp2::AdminJobOp jobOp, meta::cpp2::AdminJobResult &&resp); + Value convertJobTimestampToDateTime(int64_t timestamp); }; } // namespace graph diff --git a/src/graph/executor/test/CMakeLists.txt b/src/graph/executor/test/CMakeLists.txt index 030f6b43dd9..c0a8925a452 100644 --- a/src/graph/executor/test/CMakeLists.txt +++ b/src/graph/executor/test/CMakeLists.txt @@ -81,6 +81,7 @@ nebula_add_test( CartesianProductTest.cpp AssignTest.cpp ShowQueriesTest.cpp + JobTest.cpp OBJECTS ${EXEC_QUERY_TEST_OBJS} LIBRARIES diff --git a/src/graph/executor/test/JobTest.cpp b/src/graph/executor/test/JobTest.cpp new file mode 100644 index 00000000000..58e4c33f79b --- /dev/null +++ b/src/graph/executor/test/JobTest.cpp @@ -0,0 +1,69 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License, + * attached with Common Clause Condition 1.0, found in the LICENSES directory. + */ + +#include + +#include "common/time/TimeUtils.h" +#include "graph/context/QueryContext.h" +#include "graph/executor/admin/SubmitJobExecutor.h" +#include "graph/planner/plan/Admin.h" + +namespace nebula { +namespace graph { +class JobTest : public testing::Test {}; + +TEST_F(JobTest, JobFinishTime) { + { + meta::cpp2::AdminJobResult resp; + resp.set_job_id(0); + meta::cpp2::JobDesc jobDesc; + jobDesc.set_id(0); + jobDesc.set_start_time(123); + jobDesc.set_stop_time(0); + resp.set_job_desc({std::move(jobDesc)}); + meta::cpp2::TaskDesc taskDesc; + taskDesc.set_start_time(456); + taskDesc.set_stop_time(0); + resp.set_task_desc({std::move(taskDesc)}); + + auto qctx = std::make_unique(); + auto submitJob = SubmitJob::make( + qctx.get(), nullptr, meta::cpp2::AdminJobOp::SHOW, meta::cpp2::AdminCmd::UNKNOWN, {}); + auto submitJobExe = std::make_unique(submitJob, qctx.get()); + + auto status = submitJobExe->buildResult(meta::cpp2::AdminJobOp::SHOW, std::move(resp)); + EXPECT_TRUE(status.ok()); + auto result = std::move(status).value(); + EXPECT_EQ(result.rows.size(), 2); + EXPECT_EQ(result.rows[0][3], Value(time::TimeConversion::unixSecondsToDateTime(123))); + EXPECT_EQ(result.rows[0][4], Value::kEmpty); + EXPECT_EQ(result.rows[1][3], Value(time::TimeConversion::unixSecondsToDateTime(456))); + EXPECT_EQ(result.rows[1][4], Value::kEmpty); + } + { + meta::cpp2::AdminJobResult resp; + resp.set_job_id(0); + meta::cpp2::JobDesc jobDesc; + jobDesc.set_id(0); + jobDesc.set_start_time(123); + jobDesc.set_stop_time(0); + resp.set_job_desc({std::move(jobDesc)}); + + auto qctx = std::make_unique(); + auto submitJob = SubmitJob::make( + qctx.get(), nullptr, meta::cpp2::AdminJobOp::SHOW_All, meta::cpp2::AdminCmd::UNKNOWN, {}); + auto submitJobExe = std::make_unique(submitJob, qctx.get()); + + auto status = submitJobExe->buildResult(meta::cpp2::AdminJobOp::SHOW_All, std::move(resp)); + EXPECT_TRUE(status.ok()); + auto result = std::move(status).value(); + EXPECT_EQ(result.rows.size(), 1); + EXPECT_EQ(result.rows[0][3], Value(time::TimeConversion::unixSecondsToDateTime(123))); + EXPECT_EQ(result.rows[0][4], Value::kEmpty); + } +} +} // namespace graph +} // namespace nebula diff --git a/tests/Makefile b/tests/Makefile index 10fc66d046b..36963cbf2c7 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -61,16 +61,16 @@ sess: currdir python3 -m pytest -m "not skip" -k "not tck" job/test_session.py jobs: currdir - python3 -m pytest -m "not skip" -k "not tck" job/test_jobs.py + python3 -m pytest -m "not skip" tck/steps/test_jobs.py -test: sess jobs +test: sess python3 -m pytest -n$(J) --dist=loadfile -m "not skip" -k "not tck" $(TEST_DIR) slow-query: currdir python3 -m pytest -n$(J) -m "not skip" tck/steps/test_kill_slow_query_via_same_service.py && \ python3 -m pytest -n$(J) -m "not skip" tck/steps/test_kill_slow_query_via_different_service.py -tck: slow-query +tck: jobs slow-query python3 -m pytest -n$(J) -m "not skip" tck/steps/test_tck.py fail: currdir diff --git a/tests/common/comparator.py b/tests/common/comparator.py index 3468f7616f4..02995719e59 100644 --- a/tests/common/comparator.py +++ b/tests/common/comparator.py @@ -29,11 +29,13 @@ def __init__(self, strict=True, order=False, contains=CmpType.EQUAL, + first_n_records=-1, decode_type='utf-8', vid_fn=None): self._strict = strict self._order = order self._contains = contains + self._first_n_records=first_n_records self._decode_type = decode_type self._vid_fn = vid_fn @@ -65,13 +67,16 @@ def compare(self, resp: DataSet, expect: DataSet): if ln != self.bstr(rn): return False, -2 if self._order: - for i in range(0, len(expect.rows)): - cmp = self.compare_row(resp.rows[i], expect.rows[i]) - if self._whether_return(cmp): - return False, i - if self._contains == CmpType.CONTAINS: + if self._contains == CmpType.CONTAINS and self._first_n_records < 0: + for i in range(0, len(expect.rows)): + cmp = self.compare_row(resp.rows[i], expect.rows[i]) + if self._whether_return(cmp): + return False, i return True, None - return len(resp.rows) == len(expect.rows), -1 + elif self._contains == CmpType.CONTAINS and self._first_n_records > 0: + return self._compare_list(resp.rows[0:self._first_n_records], expect.rows, self.compare_row) + else: + return len(resp.rows) == len(expect.rows), -1 return self._compare_list(resp.rows, expect.rows, self.compare_row, self._contains) diff --git a/tests/job/test_jobs.py b/tests/job/test_jobs.py deleted file mode 100644 index ace541646bb..00000000000 --- a/tests/job/test_jobs.py +++ /dev/null @@ -1,89 +0,0 @@ -# --coding:utf-8-- -# -# Copyright (c) 2020 vesoft inc. All rights reserved. -# -# This source code is licensed under Apache 2.0 License, -# attached with Common Clause Condition 1.0, found in the LICENSES directory. - -import re -import time - -from nebula2.common import ttypes -from tests.common.nebula_test_suite import NebulaTestSuite - -class TestJobs(NebulaTestSuite): - def test_failed(self): - # submit without space - resp = self.client.execute('SUBMIT JOB COMPACT;') - self.check_resp_failed(resp, ttypes.ErrorCode.E_SEMANTIC_ERROR) - # show one not exists - resp = self.client.execute('SHOW JOB 233;') - self.check_resp_failed(resp, ttypes.ErrorCode.E_EXECUTION_ERROR) - # stop one not exists - resp = self.client.execute('STOP JOB 233;') - self.check_resp_failed(resp, ttypes.ErrorCode.E_EXECUTION_ERROR) - - def test_succeeded(self): - def check_jobs_resp_obj(resp_row, job_name): - assert resp_row[1].as_string() == job_name - assert resp_row[2].is_string() - assert resp_row[3].is_datetime() - assert resp_row[4].is_datetime() - - resp = self.client.execute('CREATE SPACE IF NOT EXISTS space_for_jobs(partition_num=9, replica_factor=1, vid_type=FIXED_STRING(20));' - 'USE space_for_jobs;') - self.check_resp_succeeded(resp) - - resp = self.client.execute('SUBMIT JOB COMPACT;') - self.check_resp_succeeded(resp) - expect_col_names = ['New Job Id'] - self.check_column_names(resp, expect_col_names) - expect_values = [[re.compile(r'\d+')]] - self.check_result(resp, expect_values, is_regex=True) - time.sleep(1) - - resp = self.client.execute('SUBMIT JOB FLUSH;') - self.check_resp_succeeded(resp) - expect_col_names = ['New Job Id'] - self.check_column_names(resp, expect_col_names) - expect_values = [[re.compile(r'\d+')]] - self.check_result(resp, expect_values, is_regex=True) - time.sleep(1) - - resp = self.client.execute('SUBMIT JOB STATS;') - self.check_resp_succeeded(resp) - expect_col_names = ['New Job Id'] - self.check_column_names(resp, expect_col_names) - expect_values = [[re.compile(r'\d+')]] - self.check_result(resp, expect_values, is_regex=True) - - time.sleep(10) - resp = self.client.execute('SHOW JOBS;') - self.check_resp_succeeded(resp) - expect_col_names = ['Job Id', 'Command', 'Status', 'Start Time', 'Stop Time'] - self.check_column_names(resp, expect_col_names) - check_jobs_resp_obj(resp.row_values(0), 'STATS') - check_jobs_resp_obj(resp.row_values(1), 'FLUSH') - check_jobs_resp_obj(resp.row_values(2), 'COMPACT') - - job_id = resp.row_values(0)[0].as_int() - resp = self.client.execute('SHOW JOB {};'.format(job_id)) - self.check_resp_succeeded(resp) - expect_col_names = ['Job Id(TaskId)', 'Command(Dest)', 'Status', 'Start Time', 'Stop Time'] - check_jobs_resp_obj(resp.row_values(0), 'STATS') - - job_id = resp.row_values(0)[0].as_int() - stop_job_resp = self.client.execute('STOP JOB {};'.format(job_id)) - if resp.row_values(0)[2].as_string() == "FINISHED": - # Executin error if the job is finished - self.check_resp_failed(stop_job_resp, ttypes.ErrorCode.E_EXECUTION_ERROR) - else: - self.check_resp_succeeded(stop_job_resp) - - # This is skipped becuase it is hard to simulate the situation - # resp = self.client.execute('RECOVER JOB;') - # self.check_resp_succeeded(resp) - # expect_col_names = ['Recovered job num'] - # self.check_column_names(resp, expect_col_names) - # expect_values = [[0]] - # self.check_result(resp, expect_values) diff --git a/tests/tck/conftest.py b/tests/tck/conftest.py index a4e59db4ca5..1a0abf392b3 100644 --- a/tests/tck/conftest.py +++ b/tests/tck/conftest.py @@ -10,8 +10,10 @@ import io import csv import re +import threading from nebula2.common.ttypes import Value, ErrorCode +from nebula2.data.DataObject import ValueWrapper from pytest_bdd import given, parsers, then, when from tests.common.dataset_printer import DataSetPrinter @@ -34,6 +36,8 @@ rparse = functools.partial(parsers.re) example_pattern = re.compile(r"<(\w+)>") +register_dict = {} +register_lock = threading.Lock() def normalize_outline_scenario(request, name): for group in example_pattern.findall(name): @@ -283,8 +287,9 @@ def cmp_dataset( order: bool, strict: bool, contains=CmpType.EQUAL, + first_n_records=-1, hashed_columns=[], -) -> None: +): rs = graph_spaces['result_set'] ngql = graph_spaces['ngql'] check_resp(rs, ngql) @@ -298,6 +303,7 @@ def cmp_dataset( dscmp = DataSetComparator(strict=strict, order=order, contains=contains, + first_n_records=first_n_records, decode_type=rs._decode_type, vid_fn=vid_fn) @@ -333,6 +339,7 @@ def rowp(ds, i): f"vid_fn: {vid_fn}", ] assert res, "\n".join(msg) + return rds @then(parse("define some list variables:\n{text}")) @@ -478,3 +485,31 @@ def executing_query(query, index, graph_spaces, session_from_first_conn_pool, se exec_query(request, ngql, session_from_first_conn_pool, graph_spaces) else: exec_query(request, ngql, session_from_second_conn_pool, graph_spaces) + +@then(parse("the result should be, the first {n:d} records in order, and register {column_name} as a list named {key}:\n{result}")) +def result_should_be_in_order_and_register_key(n, column_name, key, request, result, graph_spaces): + assert n > 0, f"The records number should be an positive integer: {n}" + result_ds = cmp_dataset(request, graph_spaces, result, order=True, strict=True, contains=CmpType.CONTAINS, first_n_records=n) + register_result_key(request.node.name, result_ds, column_name, key) + +def register_result_key(test_name, result_ds, column_name, key): + if column_name.encode() not in result_ds.column_names: + assert False, f"{column_name} not in result columns {result_ds.column_names}." + col_index = result_ds.column_names.index(column_name.encode()) + val = [row.values[col_index] for row in result_ds.rows] + register_lock.acquire() + register_dict[test_name + key] = val; + register_lock.release() + +@when(parse("executing query, fill replace holders with element index of {indices} in {keys}:\n{query}")) +def executing_query_with_params(query, indices, keys, graph_spaces, session, request): + indices_list=[int(v) for v in indices.split(",")] + key_list=[request.node.name+key for key in keys.split(",")] + assert len(indices_list) == len(key_list), f"Length not match for keys and indices: {keys} <=> {indices}" + vals = [] + register_lock.acquire() + for (key, index) in zip (key_list, indices_list): + vals.append(ValueWrapper(register_dict[key][index])) + register_lock.release() + ngql = combine_query(query).format(*vals) + exec_query(request, ngql, session, graph_spaces) diff --git a/tests/tck/features/job/SpaceRequire.feature b/tests/tck/features/job/SpaceRequire.feature deleted file mode 100644 index dbe07b650f9..00000000000 --- a/tests/tck/features/job/SpaceRequire.feature +++ /dev/null @@ -1,29 +0,0 @@ -Feature: Submit job space requirements - - Scenario: submit job require space - Given an empty graph - When executing query: - """ - SUBMIT JOB COMPACT; - """ - Then a SemanticError should be raised at runtime: - When executing query: - """ - SUBMIT JOB FLUSH; - """ - Then a SemanticError should be raised at runtime: - When executing query: - """ - SUBMIT JOB STATS; - """ - Then a SemanticError should be raised at runtime: - When executing query: - """ - REBUILD TAG INDEX not_exists_index; - """ - Then a SemanticError should be raised at runtime: - When executing query: - """ - REBUILD EDGE INDEX not_exists_index; - """ - Then a SemanticError should be raised at runtime: diff --git a/tests/tck/job/Job.feature b/tests/tck/job/Job.feature new file mode 100644 index 00000000000..9c6796d5afc --- /dev/null +++ b/tests/tck/job/Job.feature @@ -0,0 +1,105 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License, +# attached with Common Clause Condition 1.0, found in the LICENSES directory. +Feature: Submit job space requirements + + Scenario: submit job require space + Given an empty graph + When executing query: + """ + SUBMIT JOB COMPACT; + """ + Then a SemanticError should be raised at runtime: + When executing query: + """ + SUBMIT JOB FLUSH; + """ + Then a SemanticError should be raised at runtime: + When executing query: + """ + SUBMIT JOB STATS; + """ + Then a SemanticError should be raised at runtime: + When executing query: + """ + REBUILD TAG INDEX not_exists_index; + """ + Then a SemanticError should be raised at runtime: + When executing query: + """ + REBUILD EDGE INDEX not_exists_index; + """ + Then a SemanticError should be raised at runtime: + + Scenario: Not existed job + Given an empty graph + When executing query: + """ + SHOW JOB 123456; + """ + Then a ExecutionError should be raised at runtime: Key not existed! + When executing query: + """ + STOP JOB 123456; + """ + Then a ExecutionError should be raised at runtime: Key not existed! + + Scenario: Submit and show jobs + Given create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(20) | + When executing query: + """ + SUBMIT JOB COMPACT; + """ + Then the result should be, in any order: + | New Job Id | + | /\d+/ | + And wait 1 seconds + When executing query: + """ + SUBMIT JOB FLUSH; + """ + Then the result should be, in any order: + | New Job Id | + | /\d+/ | + And wait 1 seconds + When executing query: + """ + SUBMIT JOB STATS; + """ + Then the result should be, in any order: + | New Job Id | + | /\d+/ | + And wait 10 seconds + When executing query: + """ + SHOW JOBS; + """ + Then the result should be, the first 3 records in order, and register Job Id as a list named job_id: + | Job Id | Command | Status | Start Time | Stop Time | + | /\d+/ | "STATS" | "FINISHED" | /\w+/ | /\w+/ | + | /\d+/ | "FLUSH" | "FINISHED" | /\w+/ | /\w+/ | + | /\d+/ | "COMPACT" | "FINISHED" | /\w+/ | /\w+/ | + When executing query, fill replace holders with element index of 0 in job_id: + """ + SHOW JOB {}; + """ + Then the result should be, in order: + | Job Id(TaskId) | Command(Dest) | Status | Start Time | Stop Time | + | /\d+/ | "STATS" | "FINISHED" | /\w+/ | /\w+/ | + | /\d+/ | /\w+/ | "FINISHED" | /\w+/ | /\w+/ | + When executing query, fill replace holders with element index of 0 in job_id: + """ + STOP JOB {}; + """ + Then an ExecutionError should be raised at runtime: Save job failure! + +# This is skipped becuase it is hard to simulate the situation +# When executing query: +# """ +# RECOVER JOB; +# """ +# Then the result should be successful diff --git a/tests/tck/steps/test_jobs.py b/tests/tck/steps/test_jobs.py new file mode 100644 index 00000000000..26a53060d95 --- /dev/null +++ b/tests/tck/steps/test_jobs.py @@ -0,0 +1,9 @@ +# Copyright (c) 2020 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License, +# attached with Common Clause Condition 1.0, found in the LICENSES directory. + +from pytest_bdd import scenarios + + +scenarios('job/Job.feature')