diff --git a/src/common/expression/PredicateExpression.cpp b/src/common/expression/PredicateExpression.cpp index bf133a75361..17e0442f118 100644 --- a/src/common/expression/PredicateExpression.cpp +++ b/src/common/expression/PredicateExpression.cpp @@ -20,11 +20,13 @@ const Value& PredicateExpression::evalExists(ExpressionContext& ctx) { DCHECK(collection_->kind() == Expression::Kind::kAttribute || collection_->kind() == Expression::Kind::kSubscript || collection_->kind() == Expression::Kind::kLabelTagProperty || - collection_->kind() == Expression::Kind::kTagProperty) + collection_->kind() == Expression::Kind::kTagProperty || + collection_->kind() == Expression::Kind::kConstant) << "actual kind: " << collection_->kind() << ", toString: " << toString(); if (collection_->kind() == Expression::Kind::kLabelTagProperty || - collection_->kind() == Expression::Kind::kTagProperty) { + collection_->kind() == Expression::Kind::kTagProperty || + collection_->kind() == Expression::Kind::kConstant) { auto v = collection_->eval(ctx); result_ = (!v.isNull()) && (!v.empty()); return result_; diff --git a/src/graph/executor/CMakeLists.txt b/src/graph/executor/CMakeLists.txt index 0b2f00a5936..d99fbc27edf 100644 --- a/src/graph/executor/CMakeLists.txt +++ b/src/graph/executor/CMakeLists.txt @@ -44,6 +44,7 @@ nebula_add_library( query/AppendVerticesExecutor.cpp query/RollUpApplyExecutor.cpp query/PatternApplyExecutor.cpp + query/ValueExecutor.cpp algo/BFSShortestPathExecutor.cpp algo/MultiShortestPathExecutor.cpp algo/AllPathsExecutor.cpp diff --git a/src/graph/executor/Executor.cpp b/src/graph/executor/Executor.cpp index 4656559c06d..5450722f02b 100644 --- a/src/graph/executor/Executor.cpp +++ b/src/graph/executor/Executor.cpp @@ -94,6 +94,7 @@ #include "graph/executor/query/UnionAllVersionVarExecutor.h" #include "graph/executor/query/UnionExecutor.h" #include "graph/executor/query/UnwindExecutor.h" +#include "graph/executor/query/ValueExecutor.h" #include "graph/planner/plan/Admin.h" #include "graph/planner/plan/Logic.h" #include "graph/planner/plan/Maintain.h" @@ -257,6 +258,9 @@ Executor *Executor::makeExecutor(QueryContext *qctx, const PlanNode *node) { case PlanNode::Kind::kDedup: { return pool->makeAndAdd(node, qctx); } + case PlanNode::Kind::kValue: { + return pool->makeAndAdd(node, qctx); + } case PlanNode::Kind::kAssign: { return pool->makeAndAdd(node, qctx); } diff --git a/src/graph/executor/algo/AllPathsExecutor.cpp b/src/graph/executor/algo/AllPathsExecutor.cpp index a5eceefddf6..0cf34444d80 100644 --- a/src/graph/executor/algo/AllPathsExecutor.cpp +++ b/src/graph/executor/algo/AllPathsExecutor.cpp @@ -211,7 +211,9 @@ folly::Future AllPathsExecutor::buildResult() { return future.via(runner()) .ensure([this, buildPathTime]() { addState("build_path_time", buildPathTime); }) .thenValue([this](auto&& resp) { - UNUSED(resp); + if (!resp.ok()) { + return folly::makeFuture(std::move(resp)); + } if (!withProp_ || emptyPropVids_.empty()) { finish(ResultBuilder().value(Value(std::move(result_))).build()); return folly::makeFuture(Status::OK()); @@ -222,18 +224,37 @@ folly::Future AllPathsExecutor::buildResult() { folly::Future AllPathsExecutor::buildPathMultiJobs() { std::vector>> futures; - futures.emplace_back( - folly::via(runner(), [this]() { return doBuildPath(1, 0, 0, nullptr, false); })); - futures.emplace_back( - folly::via(runner(), [this]() { return doBuildPath(1, 0, 0, nullptr, true); })); + auto leftFuture = folly::via(runner(), [this]() { return doBuildPath(1, 0, 0, nullptr, false); }) + .thenError(folly::tag_t{}, + [this](const std::bad_alloc&) { + memoryExceeded_ = true; + return std::vector(); + }) + .thenError(folly::tag_t{}, + [](const std::exception&) { return std::vector(); }); + auto rightFuture = folly::via(runner(), [this]() { return doBuildPath(1, 0, 0, nullptr, true); }) + .thenError(folly::tag_t{}, + [this](const std::bad_alloc&) { + memoryExceeded_ = true; + return std::vector(); + }) + .thenError(folly::tag_t{}, + [](const std::exception&) { return std::vector(); }); + futures.emplace_back(std::move(leftFuture)); + futures.emplace_back(std::move(rightFuture)); time::Duration conjunctPathTime; - return folly::collect(futures) + return folly::collectAll(futures) .via(runner()) - .thenValue([this](std::vector>&& paths) { + .thenValue([this](std::vector>>&& paths) { + if (memoryExceeded_.load(std::memory_order_acquire) == true) { + return folly::makeFuture(Executor::memoryExceededStatus()); + } memory::MemoryCheckGuard guard; - auto& leftPaths = paths.front(); - auto& rightPaths = paths.back(); + auto& leftPathsValues = paths.front(); + auto& rightPathsValues = paths.back(); + auto leftPaths = std::move(leftPathsValues).value(); + auto rightPaths = std::move(rightPathsValues).value(); if (leftSteps_ == 0) { buildOneWayPath(rightPaths, false); @@ -259,6 +280,7 @@ folly::Future> AllPathsExecutor::doBuildPa size_t end, std::shared_ptr> pathsPtr, bool reverse) { + memory::MemoryCheckGuard guard; auto maxStep = reverse ? rightSteps_ : leftSteps_; if (step > maxStep) { return folly::makeFuture>(std::vector()); @@ -272,6 +294,9 @@ folly::Future> AllPathsExecutor::doBuildPa if (step == 1) { auto& initVids = reverse ? rightInitVids_ : leftInitVids_; for (auto& vid : initVids) { + if (memoryExceeded_.load(std::memory_order_acquire) == true) { + return folly::makeFuture>(std::vector()); + } auto vidIter = adjList.find(vid); if (vidIter == adjList.end()) { continue; @@ -288,6 +313,9 @@ folly::Future> AllPathsExecutor::doBuildPa } } else { for (auto i = start; i < end; ++i) { + if (memoryExceeded_.load(std::memory_order_acquire) == true) { + return folly::makeFuture>(std::vector()); + } auto path = (*pathsPtr)[i]; auto& edgeValue = path->edge; DCHECK(edgeValue.isEdge()); @@ -314,28 +342,46 @@ folly::Future> AllPathsExecutor::doBuildPa } auto newPathsSize = newPathsPtr->size(); - if (newPathsSize == 0) { + if (newPathsSize == 0 || memoryExceeded_.load(std::memory_order_acquire) == true) { return folly::makeFuture>(std::vector()); } std::vector>> futures; if (newPathsSize < FLAGS_path_batch_size) { - futures.emplace_back(folly::via(runner(), [this, step, newPathsSize, newPathsPtr, reverse]() { - return doBuildPath(step + 1, 0, newPathsSize, newPathsPtr, reverse); - })); + auto future = folly::via(runner(), + [this, step, newPathsSize, newPathsPtr, reverse]() { + return doBuildPath(step + 1, 0, newPathsSize, newPathsPtr, reverse); + }) + .thenError(folly::tag_t{}, + [this](const std::bad_alloc&) { + memoryExceeded_ = true; + return std::vector(); + }) + .thenError(folly::tag_t{}, + [](const std::exception&) { return std::vector(); }); + futures.emplace_back(std::move(future)); } else { for (size_t _start = 0; _start < newPathsSize; _start += FLAGS_path_batch_size) { auto tmp = _start + FLAGS_path_batch_size; auto _end = tmp > newPathsSize ? newPathsSize : tmp; - futures.emplace_back(folly::via(runner(), [this, step, _start, _end, newPathsPtr, reverse]() { - return doBuildPath(step + 1, _start, _end, newPathsPtr, reverse); - })); + auto future = folly::via(runner(), + [this, step, _start, _end, newPathsPtr, reverse]() { + return doBuildPath(step + 1, _start, _end, newPathsPtr, reverse); + }) + .thenError(folly::tag_t{}, + [this](const std::bad_alloc&) { + memoryExceeded_ = true; + return std::vector(); + }) + .thenError(folly::tag_t{}, + [](const std::exception&) { return std::vector(); }); + futures.emplace_back(std::move(future)); } } - return folly::collect(futures).via(runner()).thenValue( - [currentStepResult = newPathsPtr](std::vector>&& paths) { - memory::MemoryCheckGuard guard; + return folly::collectAll(futures).via(runner()).thenValue( + [currentStepResult = newPathsPtr](std::vector>>&& paths) { std::vector result = std::move(*currentStepResult); - for (auto& path : paths) { + for (auto& pathValue : paths) { + auto path = std::move(pathValue).value(); if (path.empty()) { continue; } @@ -485,8 +531,9 @@ folly::Future AllPathsExecutor::conjunctPath(std::vector& leftPa runner(), [this, start, end, reverse]() { return probe(start, end, reverse); })); } } - return folly::collect(futures).via(runner()).thenValue( - [this, path = std::move(oneWayPath)](std::vector>&& resps) { + return folly::collect(futures) + .via(runner()) + .thenValue([this, path = std::move(oneWayPath)](std::vector>&& resps) { memory::MemoryCheckGuard guard; result_.rows = std::move(path); for (auto& rows : resps) { @@ -510,6 +557,14 @@ folly::Future AllPathsExecutor::conjunctPath(std::vector& leftPa result_.rows.resize(limit_); } return Status::OK(); + }) + .thenError(folly::tag_t{}, + [this](const std::bad_alloc&) { + memoryExceeded_ = true; + return folly::makeFuture(Executor::memoryExceededStatus()); + }) + .thenError(folly::tag_t{}, [](const std::exception& e) { + return folly::makeFuture(std::runtime_error(e.what())); }); } @@ -528,6 +583,7 @@ void AllPathsExecutor::buildHashTable(std::vector& paths, bool reverse) } std::vector AllPathsExecutor::probe(size_t start, size_t end, bool reverse) { + memory::MemoryCheckGuard guard; auto buildPath = [](std::vector& leftPath, const Value& intersectVertex, std::vector& rightPath) { @@ -545,6 +601,9 @@ std::vector AllPathsExecutor::probe(size_t start, size_t end, bool reverse) std::vector result; Row emptyPropVerticesRow; for (size_t i = start; i < end; ++i) { + if (memoryExceeded_.load(std::memory_order_acquire) == true) { + break; + } auto& probePath = probePaths_[i]; auto& edgeVal = probePath->edge; const auto& intersectVid = edgeVal.getEdge().dst; diff --git a/src/graph/executor/algo/AllPathsExecutor.h b/src/graph/executor/algo/AllPathsExecutor.h index 0d4417764d3..0056bb0cf4d 100644 --- a/src/graph/executor/algo/AllPathsExecutor.h +++ b/src/graph/executor/algo/AllPathsExecutor.h @@ -110,6 +110,7 @@ class AllPathsExecutor final : public StorageAccessExecutor { bool noLoop_{false}; size_t limit_{std::numeric_limits::max()}; std::atomic cnt_{0}; + std::atomic memoryExceeded_{false}; size_t maxStep_{0}; size_t leftSteps_{0}; diff --git a/src/graph/executor/algo/ShortestPathExecutor.cpp b/src/graph/executor/algo/ShortestPathExecutor.cpp index 58d036ffc98..a31e331b148 100644 --- a/src/graph/executor/algo/ShortestPathExecutor.cpp +++ b/src/graph/executor/algo/ShortestPathExecutor.cpp @@ -44,6 +44,7 @@ size_t ShortestPathExecutor::checkInput(HashSet& startVids, HashSet& endVids) { auto iter = ectx_->getResult(pathNode_->inputVar()).iter(); const auto& metaVidType = *(qctx()->rctx()->session()->space().spaceDesc.vid_type_ref()); auto vidType = SchemaUtil::propTypeToValueType(metaVidType.get_type()); + bool isZeroStep = pathNode_->stepRange().min() == 0; for (; iter->valid(); iter->next()) { auto start = iter->getColumn(0); auto end = iter->getColumn(1); @@ -52,8 +53,10 @@ size_t ShortestPathExecutor::checkInput(HashSet& startVids, HashSet& endVids) { << ", end type: " << end.type() << ", space vid type: " << vidType; continue; } - if (start == end) { - // continue or return error + + // When the minimum number of steps is 0, and the starting node and the destination node + // are the same. the shortest path between the two nodes is 0 + if (isZeroStep && start == end) { continue; } startVids.emplace(std::move(start)); diff --git a/src/graph/executor/query/TraverseExecutor.cpp b/src/graph/executor/query/TraverseExecutor.cpp index 723e7f7988c..95ad17e27ed 100644 --- a/src/graph/executor/query/TraverseExecutor.cpp +++ b/src/graph/executor/query/TraverseExecutor.cpp @@ -478,6 +478,7 @@ folly::Future TraverseExecutor::buildPathMultiJobs(size_t minStep, size_ std::vector TraverseExecutor::buildPath(const Value& initVertex, size_t minStep, size_t maxStep) { + memory::MemoryCheckGuard guard; auto vidIter = adjList_.find(initVertex); if (vidIter == adjList_.end()) { return std::vector(); diff --git a/src/graph/executor/query/ValueExecutor.cpp b/src/graph/executor/query/ValueExecutor.cpp new file mode 100644 index 00000000000..04e695017ce --- /dev/null +++ b/src/graph/executor/query/ValueExecutor.cpp @@ -0,0 +1,21 @@ +// Copyright (c) 2023 vesoft inc. All rights reserved. +// +// This source code is licensed under Apache 2.0 License. + +#include "graph/executor/query/ValueExecutor.h" + +#include "graph/context/Result.h" +#include "graph/planner/plan/Query.h" +#include "graph/service/GraphFlags.h" + +namespace nebula { +namespace graph { + +folly::Future ValueExecutor::execute() { + SCOPED_TIMER(&execTime_); + auto value = asNode(node())->value(); + return finish(ResultBuilder().value(std::move(value)).build()); +} + +} // namespace graph +} // namespace nebula diff --git a/src/graph/executor/query/ValueExecutor.h b/src/graph/executor/query/ValueExecutor.h new file mode 100644 index 00000000000..6529a69bf4c --- /dev/null +++ b/src/graph/executor/query/ValueExecutor.h @@ -0,0 +1,22 @@ +// Copyright (c) 2023 vesoft inc. All rights reserved. +// +// This source code is licensed under Apache 2.0 License. + +#pragma once + +#include "graph/executor/Executor.h" + +// delete the corresponding iterator when the row in the dataset does not meet the conditions +// and save the filtered iterator to the result +namespace nebula { +namespace graph { + +class ValueExecutor final : public Executor { + public: + ValueExecutor(const PlanNode *node, QueryContext *qctx) : Executor("ValueExecutor", node, qctx) {} + + folly::Future execute() override; +}; + +} // namespace graph +} // namespace nebula diff --git a/src/graph/optimizer/CMakeLists.txt b/src/graph/optimizer/CMakeLists.txt index 5f2d09260bc..c1eb3429046 100644 --- a/src/graph/optimizer/CMakeLists.txt +++ b/src/graph/optimizer/CMakeLists.txt @@ -66,6 +66,7 @@ nebula_add_library( rule/RemoveAppendVerticesBelowJoinRule.cpp rule/EmbedEdgeAllPredIntoTraverseRule.cpp rule/PushFilterThroughAppendVerticesRule.cpp + rule/EliminateFilterRule.cpp ) nebula_add_subdirectory(test) diff --git a/src/graph/optimizer/OptRule.cpp b/src/graph/optimizer/OptRule.cpp index d9e46e04e8f..3905ebecc6c 100644 --- a/src/graph/optimizer/OptRule.cpp +++ b/src/graph/optimizer/OptRule.cpp @@ -213,6 +213,11 @@ RuleSet &RuleSet::DefaultRules() { return kDefaultRules; } +RuleSet &RuleSet::QueryRules0() { + static RuleSet kQueryRules0("QueryRuleSet0"); + return kQueryRules0; +} + RuleSet &RuleSet::QueryRules() { static RuleSet kQueryRules("QueryRuleSet"); return kQueryRules; @@ -231,6 +236,15 @@ RuleSet *RuleSet::addRule(const OptRule *rule) { return this; } +std::string RuleSet::toString() const { + std::stringstream ss; + ss << "RuleSet: " << name_ << std::endl; + for (auto rule : rules_) { + ss << rule->toString() << std::endl; + } + return ss.str(); +} + void RuleSet::merge(const RuleSet &ruleset) { for (auto rule : ruleset.rules()) { addRule(rule); diff --git a/src/graph/optimizer/OptRule.h b/src/graph/optimizer/OptRule.h index 1a0e0d65046..7de211c9b64 100644 --- a/src/graph/optimizer/OptRule.h +++ b/src/graph/optimizer/OptRule.h @@ -128,6 +128,7 @@ class OptRule { class RuleSet final { public: static RuleSet &DefaultRules(); + static RuleSet &QueryRules0(); static RuleSet &QueryRules(); RuleSet *addRule(const OptRule *rule); @@ -138,6 +139,8 @@ class RuleSet final { return rules_; } + std::string toString() const; + private: explicit RuleSet(const std::string &name); diff --git a/src/graph/optimizer/rule/EliminateFilterRule.cpp b/src/graph/optimizer/rule/EliminateFilterRule.cpp new file mode 100644 index 00000000000..f38661bf893 --- /dev/null +++ b/src/graph/optimizer/rule/EliminateFilterRule.cpp @@ -0,0 +1,77 @@ +/* Copyright (c) 2023 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "graph/optimizer/rule/EliminateFilterRule.h" + +#include "common/expression/Expression.h" +#include "graph/optimizer/OptContext.h" +#include "graph/optimizer/OptGroup.h" +#include "graph/planner/plan/Logic.h" +#include "graph/planner/plan/PlanNode.h" +#include "graph/planner/plan/Query.h" + +using nebula::graph::Filter; +using nebula::graph::QueryContext; +using nebula::graph::StartNode; +using nebula::graph::ValueNode; + +namespace nebula { +namespace opt { + +std::unique_ptr EliminateFilterRule::kInstance = + std::unique_ptr(new EliminateFilterRule()); + +EliminateFilterRule::EliminateFilterRule() { + RuleSet::QueryRules0().addRule(this); +} + +// TODO match Filter->(Any Node with real result) +const Pattern& EliminateFilterRule::pattern() const { + static Pattern pattern = Pattern::create(graph::PlanNode::Kind::kFilter); + return pattern; +} + +bool EliminateFilterRule::match(OptContext* octx, const MatchedResult& matched) const { + if (!OptRule::match(octx, matched)) { + return false; + } + const auto* filterNode = static_cast(matched.node->node()); + const auto* expr = filterNode->condition(); + if (expr->kind() != Expression::Kind::kConstant) { + return false; + } + const auto* constant = static_cast(expr); + auto ret = (constant->value().isImplicitBool() && constant->value().getBool() == false) || + constant->value().isNull(); + return ret; +} + +StatusOr EliminateFilterRule::transform( + OptContext* octx, const MatchedResult& matched) const { + auto filterGroupNode = matched.node; + auto filter = static_cast(filterGroupNode->node()); + + auto newValue = ValueNode::make(octx->qctx(), nullptr, DataSet(filter->colNames())); + newValue->setOutputVar(filter->outputVar()); + auto newValueGroupNode = OptGroupNode::create(octx, newValue, filterGroupNode->group()); + + auto newStart = StartNode::make(octx->qctx()); + auto newStartGroup = OptGroup::create(octx); + newStartGroup->makeGroupNode(newStart); + + newValueGroupNode->dependsOn(newStartGroup); + + TransformResult result; + result.eraseAll = true; + result.newGroupNodes.emplace_back(newValueGroupNode); + return result; +} + +std::string EliminateFilterRule::toString() const { + return "EliminateFilterRule"; +} + +} // namespace opt +} // namespace nebula diff --git a/src/graph/optimizer/rule/EliminateFilterRule.h b/src/graph/optimizer/rule/EliminateFilterRule.h new file mode 100644 index 00000000000..9d4dc31c705 --- /dev/null +++ b/src/graph/optimizer/rule/EliminateFilterRule.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2023 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once + +#include "graph/optimizer/OptRule.h" + +namespace nebula { +namespace opt { + +// Eliminate useless [[Filter(false)]] node +// Required conditions: +// 1. Match the pattern +// 2. Filter condition is false/null +// Benefits: +// 1. Delete unnecessary node +// +// Transformation: +// Before: +// +// +------+--------+ +// | Filter(false) | +// +------+--------+ +// | +// +------+------+ +// | ..... | +// +------+------+ +// +// After: +// +// +------+------+ +// | Value | +// +------+------+ +// | Start | +// +------+------+ + +class EliminateFilterRule final : public OptRule { + public: + const Pattern &pattern() const override; + + bool match(OptContext *ctx, const MatchedResult &matched) const override; + + StatusOr transform(OptContext *ctx, const MatchedResult &matched) const override; + + std::string toString() const override; + + private: + EliminateFilterRule(); + + static std::unique_ptr kInstance; +}; + +} // namespace opt +} // namespace nebula diff --git a/src/graph/planner/match/ShortestPathPlanner.cpp b/src/graph/planner/match/ShortestPathPlanner.cpp index d93b7d2c15d..f03c5fce117 100644 --- a/src/graph/planner/match/ShortestPathPlanner.cpp +++ b/src/graph/planner/match/ShortestPathPlanner.cpp @@ -55,6 +55,10 @@ StatusOr ShortestPathPlanner::transform(WhereClauseContext* bindWhereCl SubPlan subplan; bool singleShortest = path_.pathType == Path::PathType::kSingleShortest; auto& nodeInfos = path_.nodeInfos; + if (nodeInfos.front().alias == nodeInfos.back().alias) { + return Status::SemanticError( + "The shortest path algorithm does not work when the start and end nodes are the same"); + } auto& edge = path_.edgeInfos.front(); std::vector colNames; colNames.emplace_back(nodeInfos.front().alias); diff --git a/src/graph/planner/plan/PlanNode.cpp b/src/graph/planner/plan/PlanNode.cpp index d9b9ebd8a7e..f4596498159 100644 --- a/src/graph/planner/plan/PlanNode.cpp +++ b/src/graph/planner/plan/PlanNode.cpp @@ -96,6 +96,8 @@ const char* PlanNode::toString(PlanNode::Kind kind) { return "Loop"; case Kind::kDedup: return "Dedup"; + case Kind::kValue: + return "Value"; case Kind::kPassThrough: return "PassThrough"; case Kind::kAssign: diff --git a/src/graph/planner/plan/PlanNode.h b/src/graph/planner/plan/PlanNode.h index eb1991f1aa5..f327b69b9d5 100644 --- a/src/graph/planner/plan/PlanNode.h +++ b/src/graph/planner/plan/PlanNode.h @@ -45,6 +45,8 @@ class PlanNode { kScanVertices, kScanEdges, kFulltextIndexScan, + // direct value + kValue, // ------------------ kFilter, diff --git a/src/graph/planner/plan/Query.h b/src/graph/planner/plan/Query.h index 02d95f95730..ded3d16f2a6 100644 --- a/src/graph/planner/plan/Query.h +++ b/src/graph/planner/plan/Query.h @@ -6,6 +6,8 @@ #ifndef GRAPH_PLANNER_PLAN_QUERY_H_ #define GRAPH_PLANNER_PLAN_QUERY_H_ +#include + #include "common/expression/AggregateExpression.h" #include "graph/context/QueryContext.h" #include "graph/planner/plan/PlanNode.h" @@ -1713,7 +1715,15 @@ class Traverse final : public GetNeighbors { const std::string& edgeAlias() const { DCHECK(!this->colNames().empty()); - return this->colNames().back(); + const auto& colNames = this->colNames(); + auto n = colNames.size(); + + if (!genPath_) { + return colNames[n - 1]; + } + // When a path needs to be generated, Traverse outputs one more column + DCHECK_GT(n, 2); + return colNames[n - 2]; } void setStepRange(const MatchStepRange& range) { @@ -2042,6 +2052,28 @@ class PatternApply : public BinaryInputNode { bool isAntiPred_{false}; }; +class ValueNode : public SingleInputNode { + public: + // Value with empty result + static ValueNode* make(QueryContext* qctx, PlanNode* dep, DataSet value) { + return qctx->objPool()->makeAndAdd(qctx, dep, std::move(value)); + } + + Value value() const { + return value_; + } + + private: + friend ObjectPool; + ValueNode(QueryContext* qctx, PlanNode* dep, DataSet value) + : SingleInputNode(qctx, Kind::kValue, dep), value_(std::move(value)) { + setColNames(value_.colNames); + } + + private: + DataSet value_; +}; + } // namespace graph } // namespace nebula #endif // GRAPH_PLANNER_PLAN_QUERY_H_ diff --git a/src/graph/service/QueryEngine.cpp b/src/graph/service/QueryEngine.cpp index f7045f7fd50..6e40d4ac499 100644 --- a/src/graph/service/QueryEngine.cpp +++ b/src/graph/service/QueryEngine.cpp @@ -37,6 +37,7 @@ Status QueryEngine::init(std::shared_ptr ioExecutor // Set default optimizer rules std::vector rulesets{&opt::RuleSet::DefaultRules()}; if (FLAGS_enable_optimizer) { + rulesets.emplace_back(&opt::RuleSet::QueryRules0()); rulesets.emplace_back(&opt::RuleSet::QueryRules()); } optimizer_ = std::make_unique(rulesets); diff --git a/src/graph/util/ExpressionUtils.cpp b/src/graph/util/ExpressionUtils.cpp index 456ec403441..da38cf542ee 100644 --- a/src/graph/util/ExpressionUtils.cpp +++ b/src/graph/util/ExpressionUtils.cpp @@ -16,6 +16,7 @@ #include "graph/context/QueryExpressionContext.h" #include "graph/visitor/FoldConstantExprVisitor.h" #include "graph/visitor/PropertyTrackerVisitor.h" +#include "graph/visitor/RewriteVisitor.h" DEFINE_int32(max_expression_depth, 512, "Max depth of expression tree."); @@ -1712,5 +1713,31 @@ bool ExpressionUtils::isOneStepEdgeProp(const std::string &edgeAlias, const Expr return graph::RewriteVisitor::transform(expr, matcher, rewriter); } +/*static*/ Expression *ExpressionUtils::rewriteAlwaysNullLabelTagProperty( + ObjectPool *pool, meta::SchemaManager *schemaMan, GraphSpaceID spaceId, Expression *expr) { + auto matcher = [spaceId, schemaMan](const Expression *e) -> bool { + if (e->kind() == Expression::Kind::kLabelTagProperty) { + auto *ltpExpr = static_cast(e); + auto tagSchema = schemaMan->getTagSchema(spaceId, ltpExpr->sym()); + if (tagSchema == nullptr) { + // no such tag + return true; + } + auto found = tagSchema->getFieldIndex(ltpExpr->prop()); + if (found < 0) { + // no such property + return true; + } + } + return false; + }; + auto rewriter = [pool](const Expression *e) -> Expression * { + DCHECK_EQ(e->kind(), Expression::Kind::kLabelTagProperty); + UNUSED(e); + return ConstantExpression::make(pool); + }; + return graph::RewriteVisitor::transform(expr, matcher, rewriter); +} + } // namespace graph } // namespace nebula diff --git a/src/graph/util/ExpressionUtils.h b/src/graph/util/ExpressionUtils.h index 3ec993b552b..c7151b04d5d 100644 --- a/src/graph/util/ExpressionUtils.h +++ b/src/graph/util/ExpressionUtils.h @@ -14,6 +14,7 @@ #include "common/expression/PropertyExpression.h" #include "common/expression/TypeCastingExpression.h" #include "common/expression/UnaryExpression.h" +#include "common/thrift/ThriftTypes.h" #include "graph/context/ast/CypherAstContext.h" #include "graph/visitor/EvaluableExprVisitor.h" #include "graph/visitor/FindVisitor.h" @@ -265,6 +266,12 @@ class ExpressionUtils { static Expression* rewriteVertexPropertyFilter(ObjectPool* pool, const std::string& node, Expression* expr); + + // label.not_exists_tag.prop => null + static Expression* rewriteAlwaysNullLabelTagProperty(ObjectPool* pool, + meta::SchemaManager* schemaMan, + GraphSpaceID spaceId, + Expression* expr); }; } // namespace graph diff --git a/src/graph/util/FTIndexUtils.cpp b/src/graph/util/FTIndexUtils.cpp index 84e5a3d60b8..24d01a10ec2 100644 --- a/src/graph/util/FTIndexUtils.cpp +++ b/src/graph/util/FTIndexUtils.cpp @@ -41,6 +41,5 @@ StatusOr<::nebula::plugin::ESAdapter> FTIndexUtils::getESAdapter(meta::MetaClien return ::nebula::plugin::ESAdapter(std::move(clients)); } - } // namespace graph } // namespace nebula diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index 7071cd85219..6062c184495 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -367,6 +367,11 @@ Status MatchValidator::validateFilter(const Expression *filter, // rewrite Attribute to LabelTagProperty newFilter = ExpressionUtils::rewriteAttr2LabelTagProp(transformRes.value(), whereClauseCtx.aliasesAvailable); + newFilter = ExpressionUtils::rewriteAlwaysNullLabelTagProperty( + qctx_->objPool(), qctx_->schemaMng(), space_.id, newFilter); + auto result = ExpressionUtils::foldConstantExpr(newFilter); + NG_RETURN_IF_ERROR(result); + newFilter = result.value(); newFilter = ExpressionUtils::rewriteEdgePropFunc2LabelAttribute(newFilter, whereClauseCtx.aliasesAvailable); diff --git a/src/graph/validator/MutateValidator.cpp b/src/graph/validator/MutateValidator.cpp index 899797cc25b..9eb2b6413ed 100644 --- a/src/graph/validator/MutateValidator.cpp +++ b/src/graph/validator/MutateValidator.cpp @@ -326,6 +326,10 @@ Status DeleteVerticesValidator::validateImpl() { } else { auto vIds = sentence->vertices()->vidList(); for (auto vId : vIds) { + if (!ExpressionUtils::isEvaluableExpr(vId, qctx_)) { + return Status::SemanticError("`%s' is not an evaluable expression.", + vId->toString().c_str()); + } auto idStatus = SchemaUtil::toVertexID(vId, vidType_); NG_RETURN_IF_ERROR(idStatus); vertices_.emplace_back(std::move(idStatus).value()); @@ -464,6 +468,10 @@ Status DeleteTagsValidator::validateImpl() { } else { auto vIds = sentence->vertices()->vidList(); for (auto vId : vIds) { + if (!ExpressionUtils::isEvaluableExpr(vId, qctx_)) { + return Status::SemanticError("`%s' is not an evaluable expression.", + vId->toString().c_str()); + } auto idStatus = SchemaUtil::toVertexID(vId, vidType_); NG_RETURN_IF_ERROR(idStatus); vertices_.emplace_back(std::move(idStatus).value()); @@ -551,6 +559,14 @@ Status DeleteEdgesValidator::buildEdgeKeyRef(const std::vector &edgeK for (auto &edgeKey : edgeKeys) { Row row; storage::cpp2::EdgeKey key; + if (!ExpressionUtils::isEvaluableExpr(edgeKey->srcid(), qctx_)) { + return Status::SemanticError("`%s' is not an evaluable expression.", + edgeKey->srcid()->toString().c_str()); + } + if (!ExpressionUtils::isEvaluableExpr(edgeKey->dstid(), qctx_)) { + return Status::SemanticError("`%s' is not an evaluable expression.", + edgeKey->dstid()->toString().c_str()); + } auto srcIdStatus = SchemaUtil::toVertexID(edgeKey->srcid(), vidType_); NG_RETURN_IF_ERROR(srcIdStatus); auto dstIdStatus = SchemaUtil::toVertexID(edgeKey->dstid(), vidType_); diff --git a/src/kvstore/raftex/Host.cpp b/src/kvstore/raftex/Host.cpp index 826bb0e4e2c..22bbad341c4 100644 --- a/src/kvstore/raftex/Host.cpp +++ b/src/kvstore/raftex/Host.cpp @@ -21,6 +21,9 @@ DEFINE_uint32(max_appendlog_batch_size, "The max number of logs in each appendLog request batch"); DEFINE_uint32(max_outstanding_requests, 1024, "The max number of outstanding appendLog requests"); DEFINE_int32(raft_rpc_timeout_ms, 1000, "rpc timeout for raft client"); +DEFINE_int32(pause_host_time_factor, + 4, + "The factor of pause host time based on raft heartbeat interval"); DECLARE_bool(trace_raft); DECLARE_uint32(raft_heartbeat_interval_secs); @@ -60,11 +63,22 @@ nebula::cpp2::ErrorCode Host::canAppendLog() const { return nebula::cpp2::ErrorCode::SUCCEEDED; } +nebula::cpp2::ErrorCode Host::canSendHBOrVote() const { + CHECK(!lock_.try_lock()); + if (stopped_) { + VLOG(2) << idStr_ << "The host is stopped, just return"; + return nebula::cpp2::ErrorCode::E_RAFT_HOST_STOPPED; + } + + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + folly::Future Host::askForVote(const cpp2::AskForVoteRequest& req, folly::EventBase* eb) { { std::lock_guard g(lock_); - if (stopped_) { + auto res = canSendHBOrVote(); + if (res != nebula::cpp2::ErrorCode::SUCCEEDED) { VLOG(3) << idStr_ << "The Host is not in a proper status, do not send"; cpp2::AskForVoteResponse resp; resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_HOST_STOPPED; @@ -410,11 +424,39 @@ folly::Future Host::sendHeartbeat( pro = std::move(promise)](folly::Try&& t) mutable { VLOG(4) << self->idStr_ << "heartbeat call got response"; if (t.hasException()) { + using TransportException = apache::thrift::transport::TTransportException; + auto exWrapper = std::move(t).exception(); + auto exception = exWrapper.get_exception(); + VLOG(2) << self->idStr_ << "Heartbeat: " << exception->what(); + // If we keeps receiving NOT_OPEN exception after some HB intervals, + // we can assume that the peer is down so we mark paused_ as true + if (exception && exception->getType() == TransportException::NOT_OPEN) { + if (!self->paused_) { + auto now = time::WallClock::fastNowInMilliSec(); + if (now - self->lastHeartbeatTime_ >= + FLAGS_pause_host_time_factor * FLAGS_raft_heartbeat_interval_secs * 1000) { + LOG(WARNING) << self->idStr_ + << "Pasue this host because long time no heartbeat response"; + std::lock_guard g(self->lock_); + self->paused_ = true; + } + } + } cpp2::HeartbeatResponse resp; resp.error_code_ref() = nebula::cpp2::ErrorCode::E_RAFT_RPC_EXCEPTION; pro.setValue(std::move(resp)); return; } else { + auto& resp = t.value(); + if (resp.error_code_ref() == nebula::cpp2::ErrorCode::SUCCEEDED) { + std::lock_guard g(self->lock_); + // If the peer is back online and ready, we set paused_ as false, + // the leader can then resume sending appendLog request to this peer + if (self->paused_) { + self->paused_ = false; + } + } + self->setLastHeartbeatTime(time::WallClock::fastNowInMilliSec()); pro.setValue(std::move(t.value())); } }); @@ -427,7 +469,7 @@ folly::Future Host::sendHeartbeatRequest( { std::lock_guard g(lock_); - auto res = canAppendLog(); + auto res = canSendHBOrVote(); if (res != nebula::cpp2::ErrorCode::SUCCEEDED) { VLOG(3) << idStr_ << "The Host is not in a proper status, do not send"; cpp2::HeartbeatResponse resp; @@ -459,8 +501,8 @@ std::shared_ptr Host::getPendingReqIfAny(std::shared_ptr // Check if there are any pending request to send if (self->noRequest()) { - self->noMoreRequestCV_.notify_all(); self->requestOnGoing_ = false; + self->noMoreRequestCV_.notify_all(); return nullptr; } diff --git a/src/kvstore/raftex/Host.h b/src/kvstore/raftex/Host.h index 0f5f481849a..49bbf3d9ee8 100644 --- a/src/kvstore/raftex/Host.h +++ b/src/kvstore/raftex/Host.h @@ -168,6 +168,13 @@ class Host final : public std::enable_shared_from_this { */ nebula::cpp2::ErrorCode canAppendLog() const; + /** + * @brief Whether Host can send HB or AskForVote request to the peer + * + * @return nebula::cpp2::ErrorCode + */ + nebula::cpp2::ErrorCode canSendHBOrVote() const; + /** * @brief Send append log rpc * @@ -244,6 +251,12 @@ class Host final : public std::enable_shared_from_this { mutable std::mutex lock_; + // If stopped_ is true, we will not send any request to the peer; + // If stopped_ is false: + // 1. no mater whether paused_ is true or not, we can send HB request or AskForVote request; + // 2. Only if paused_ is false, we can send appendlog request, of course, including HB + // request and AskForRequest request + // See canAppendLog() and canSendHBOrVote() bool paused_{false}; bool stopped_{false}; diff --git a/src/kvstore/raftex/RaftPart.cpp b/src/kvstore/raftex/RaftPart.cpp index a48505ffae1..a4fb54f053e 100644 --- a/src/kvstore/raftex/RaftPart.cpp +++ b/src/kvstore/raftex/RaftPart.cpp @@ -2097,10 +2097,6 @@ void RaftPart::sendHeartbeat() { if (!hosts[resp.first]->isLearner() && resp.second.get_error_code() == nebula::cpp2::ErrorCode::SUCCEEDED) { ++numSucceeded; - // only metad 0 space 0 part need this state now. - if (spaceId_ == kDefaultSpaceId) { - hosts[resp.first]->setLastHeartbeatTime(time::WallClock::fastNowInMilliSec()); - } } highestTerm = std::max(highestTerm, resp.second.get_current_term()); } diff --git a/src/meta/processors/session/SessionManagerProcessor.cpp b/src/meta/processors/session/SessionManagerProcessor.cpp index e33a5341b60..e1860e4b24b 100644 --- a/src/meta/processors/session/SessionManagerProcessor.cpp +++ b/src/meta/processors/session/SessionManagerProcessor.cpp @@ -133,10 +133,8 @@ void ListSessionsProcessor::process(const cpp2::ListSessionsReq&) { sessions.emplace_back(std::move(session)); iter->next(); } + LOG(INFO) << "resp session size: " << sessions.size(); resp_.sessions_ref() = std::move(sessions); - for (auto& session : resp_.get_sessions()) { - LOG(INFO) << "resp list session: " << session.get_session_id(); - } handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED); onFinished(); } diff --git a/src/storage/CompactionFilter.h b/src/storage/CompactionFilter.h index 2c47e1576a9..d69979c0736 100644 --- a/src/storage/CompactionFilter.h +++ b/src/storage/CompactionFilter.h @@ -17,7 +17,7 @@ #include "storage/StorageFlags.h" DEFINE_int32(min_level_for_custom_filter, - 4, + 0, "Minimal level compaction which will go through custom compaction filter"); namespace nebula { diff --git a/tests/tck/features/delete/DeleteEdge.IntVid.feature b/tests/tck/features/delete/DeleteEdge.IntVid.feature index f676ea0ef85..b3687785e61 100644 --- a/tests/tck/features/delete/DeleteEdge.IntVid.feature +++ b/tests/tck/features/delete/DeleteEdge.IntVid.feature @@ -224,6 +224,12 @@ Feature: Delete int vid of edge | like._dst | | "Tony Parker" | | "Tim Duncan" | + When executing query: + """ + GO FROM hash("Boris Diaw") OVER like YIELD edge as e + | DELETE EDGE like src($-.e)->dst($-.e) + """ + Then a SemanticError should be raised at runtime: `src($-.e)' is not an evaluable expression. When executing query: """ GO FROM hash("Boris Diaw") OVER like diff --git a/tests/tck/features/delete/DeleteEdge.feature b/tests/tck/features/delete/DeleteEdge.feature index adb9a5c755c..acc4b6cff4e 100644 --- a/tests/tck/features/delete/DeleteEdge.feature +++ b/tests/tck/features/delete/DeleteEdge.feature @@ -224,6 +224,12 @@ Feature: Delete string vid of edge | like._dst | | "Tony Parker" | | "Tim Duncan" | + When executing query: + """ + GO FROM "Boris Diaw" OVER like YIELD edge as e + | DELETE EDGE like src($-.e)->dst($-.e) + """ + Then a SemanticError should be raised at runtime: `src($-.e)' is not an evaluable expression. When executing query: """ GO FROM "Boris Diaw" OVER like diff --git a/tests/tck/features/delete/DeleteTag.IntVid.feature b/tests/tck/features/delete/DeleteTag.IntVid.feature index 11b106c7db9..f7327a49932 100644 --- a/tests/tck/features/delete/DeleteTag.IntVid.feature +++ b/tests/tck/features/delete/DeleteTag.IntVid.feature @@ -247,6 +247,11 @@ Feature: Delete int vid of tag | team.name | | "Spurs" | # delete one tag + When executing query: + """ + GO FROM hash("Tim Duncan") OVER serve YIELD edge as e | DELETE TAG team FROM src($-.e) + """ + Then a SemanticError should be raised at runtime: `src($-.e)' is not an evaluable expression. When executing query: """ GO FROM hash("Tim Duncan") OVER serve YIELD serve._dst as id | DELETE TAG team FROM $-.id diff --git a/tests/tck/features/delete/DeleteTag.feature b/tests/tck/features/delete/DeleteTag.feature index 3afecefa036..9bea868cb09 100644 --- a/tests/tck/features/delete/DeleteTag.feature +++ b/tests/tck/features/delete/DeleteTag.feature @@ -247,6 +247,11 @@ Feature: Delete string vid of tag | team.name | | "Spurs" | # delete one tag + When executing query: + """ + GO FROM "Tim Duncan" OVER serve YIELD edge as e | DELETE TAG team FROM src($-.e) + """ + Then a SemanticError should be raised at runtime: `src($-.e)' is not an evaluable expression. When executing query: """ GO FROM "Tim Duncan" OVER serve YIELD serve._dst as id | DELETE TAG team FROM $-.id diff --git a/tests/tck/features/delete/DeleteVertex.IntVid.feature b/tests/tck/features/delete/DeleteVertex.IntVid.feature index 09a55727893..19cec5b2d80 100644 --- a/tests/tck/features/delete/DeleteVertex.IntVid.feature +++ b/tests/tck/features/delete/DeleteVertex.IntVid.feature @@ -230,6 +230,11 @@ Feature: Delete int vid of vertex | like._dst | | "Tony Parker" | | "Manu Ginobili" | + When executing query: + """ + GO FROM hash("Boris Diaw") OVER like YIELD edge as e | DELETE VERTEX src($-.e) WITH EDGE + """ + Then a SemanticError should be raised at runtime: `src($-.e)' is not an evaluable expression. When executing query: """ GO FROM hash("Boris Diaw") OVER like YIELD like._dst as id | DELETE VERTEX $-.id WITH EDGE diff --git a/tests/tck/features/delete/DeleteVertex.feature b/tests/tck/features/delete/DeleteVertex.feature index 8cc9864d39d..706e4ac6297 100644 --- a/tests/tck/features/delete/DeleteVertex.feature +++ b/tests/tck/features/delete/DeleteVertex.feature @@ -231,6 +231,11 @@ Feature: Delete string vid of vertex | like._dst | | "Tony Parker" | | "Manu Ginobili" | + When executing query: + """ + GO FROM "Boris Diaw" OVER like YIELD edge as e | DELETE VERTEX src($-.e) WITH EDGE + """ + Then a SemanticError should be raised at runtime: `src($-.e)' is not an evaluable expression. When executing query: """ GO FROM "Boris Diaw" OVER like YIELD like._dst as id | DELETE VERTEX $-.id WITH EDGE diff --git a/tests/tck/features/delete/DeleteVertexWithoutEdge.feature b/tests/tck/features/delete/DeleteVertexWithoutEdge.feature index 74494df5299..94221fa55c5 100644 --- a/tests/tck/features/delete/DeleteVertexWithoutEdge.feature +++ b/tests/tck/features/delete/DeleteVertexWithoutEdge.feature @@ -88,6 +88,11 @@ Feature: delete vertex without edge | a | id | | 2 | 1 | | 3 | 1 | + When executing query: + """ + GO FROM 1 OVER e YIELD edge as e1 | DELETE VERTEX dst($-.e1) + """ + Then a SemanticError should be raised at runtime: `dst($-.e1)' is not an evaluable expression. When executing query: """ DELETE VERTEX 1; diff --git a/tests/tck/features/match/AllShortestPaths.feature b/tests/tck/features/match/AllShortestPaths.feature index 563d90a371d..f777312f796 100644 --- a/tests/tck/features/match/AllShortestPaths.feature +++ b/tests/tck/features/match/AllShortestPaths.feature @@ -898,3 +898,73 @@ Feature: allShortestPaths | 11 | Argument | | | | 14 | Project | 13 | | | 13 | Argument | | | + + Scenario: allShortestPaths for same start and end node + When executing query: + """ + MATCH (a:player{name:'Yao Ming'}) + MATCH p = allShortestPaths((a)-[:like*1..3]-(a)) + RETURN p + """ + Then a SemanticError should be raised at runtime: The shortest path algorithm does not work when the start and end nodes are the same + When executing query: + """ + MATCH p = allShortestPaths((a:player{name:'Yao Ming'})-[:like*1..3]-(a)) + RETURN p + """ + Then a SemanticError should be raised at runtime: The shortest path algorithm does not work when the start and end nodes are the same + When executing query: + """ + MATCH (a:player{name:'Yao Ming'}), (b:player{name:'Yao Ming'}) + MATCH p = allShortestPaths((a)-[:like*0..3]-(b)) + RETURN p + """ + Then the result should be, in any order, with relax comparison: + | p | + When executing query: + """ + MATCH p = allShortestPaths((a)-[:like*0..3]-(b)) + WHERE id(a) == 'Yao Ming' AND id(b) == 'Yao Ming' + RETURN p + """ + Then the result should be, in any order, with relax comparison: + | p | + When executing query: + """ + MATCH (a:player{name:'Yao Ming'}), (b:player{name:'Yao Ming'}) + MATCH p = allShortestPaths((a)-[:like*1..3]-(b)) + RETURN p + """ + Then the result should be, in any order, with relax comparison: + | p | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + When executing query: + """ + MATCH (a:player{name:'Yao Ming'}) + MATCH p = allShortestPaths((a)-[:like*1..3]-(b:player{name:'Yao Ming'})) + RETURN p + """ + Then the result should be, in any order, with relax comparison: + | p | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + When executing query: + """ + MATCH p = allShortestPaths((a:player{name:'Yao Ming'})-[:like*1..3]-(b:player{name:'Yao Ming'})) + RETURN p + """ + Then the result should be, in any order, with relax comparison: + | p | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + When executing query: + """ + MATCH p = allShortestPaths((a)-[:like*1..3]-(b)) + WHERE id(a) == 'Yao Ming' AND id(b) == 'Yao Ming' + RETURN p + """ + Then the result should be, in any order, with relax comparison: + | p | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | diff --git a/tests/tck/features/match/SingleShorestPath.feature b/tests/tck/features/match/SingleShorestPath.feature index 78ad83c1662..abf9f8d96ac 100644 --- a/tests/tck/features/match/SingleShorestPath.feature +++ b/tests/tck/features/match/SingleShorestPath.feature @@ -746,3 +746,69 @@ Feature: single shortestPath | <("JaVale McGee":player{age:31,name:"JaVale McGee"})-[:serve@0{end_year:2018,start_year:2016}]->("Warriors":team{name:"Warriors"})<-[:serve@0{end_year:2009,start_year:2007}]-("Marco Belinelli":player{age:32,name:"Marco Belinelli"})-[:like@0{likeness:50}]->("Tony Parker":player{age:36,name:"Tony Parker"})> | | <("LeBron James":player{age:34,name:"LeBron James"})<-[:like@0{likeness:99}]-("Dejounte Murray":player{age:29,name:"Dejounte Murray"})-[:like@0{likeness:99}]->("Tony Parker":player{age:36,name:"Tony Parker"})> | | <("Kings":team{name:"Kings"})<-[:serve@0{end_year:2016,start_year:2015}]-("Marco Belinelli":player{age:32,name:"Marco Belinelli"})-[:like@0{likeness:50}]->("Tony Parker":player{age:36,name:"Tony Parker"})> | + + Scenario: shortestPath for same start and end node + When executing query: + """ + MATCH (a:player{name:'Yao Ming'}) + MATCH p = shortestPath((a)-[:like*1..3]-(a)) + RETURN p + """ + Then a SemanticError should be raised at runtime: The shortest path algorithm does not work when the start and end nodes are the same + When executing query: + """ + MATCH p = shortestPath((a:player{name:'Yao Ming'})-[:like*1..3]-(a)) + RETURN p + """ + Then a SemanticError should be raised at runtime: The shortest path algorithm does not work when the start and end nodes are the same + When executing query: + """ + MATCH (a:player{name:'Yao Ming'}), (b:player{name:'Yao Ming'}) + MATCH p = shortestPath((a)-[:like*0..3]-(b)) + RETURN p + """ + Then the result should be, in any order, with relax comparison: + | p | + When executing query: + """ + MATCH p = shortestPath((a)-[:like*0..3]-(b)) + WHERE id(a) == 'Yao Ming' AND id(b) == 'Yao Ming' + RETURN p + """ + Then the result should be, in any order, with relax comparison: + | p | + When executing query: + """ + MATCH (a:player{name:'Yao Ming'}), (b:player{name:'Yao Ming'}) + MATCH p = shortestPath((a)-[:like*1..3]-(b)) + RETURN a, b + """ + Then the result should be, in any order, with relax comparison: + | a | b | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + When executing query: + """ + MATCH (a:player{name:'Yao Ming'}) + MATCH p = shortestPath((a)-[:like*1..3]-(b:player{name:'Yao Ming'})) + RETURN a,b + """ + Then the result should be, in any order, with relax comparison: + | a | b | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + When executing query: + """ + MATCH p = shortestPath((a:player{name:'Yao Ming'})-[:like*1..3]-(b:player{name:'Yao Ming'})) + RETURN a,b + """ + Then the result should be, in any order, with relax comparison: + | a | b | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + When executing query: + """ + MATCH p = shortestPath((a)-[:like*1..3]-(b)) + WHERE id(a) == 'Yao Ming' AND id(b) == 'Yao Ming' + RETURN a,b + """ + Then the result should be, in any order, with relax comparison: + | a | b | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | diff --git a/tests/tck/features/optimizer/ElimintateInvalidProp.feature b/tests/tck/features/optimizer/ElimintateInvalidProp.feature new file mode 100644 index 00000000000..96656a74a2c --- /dev/null +++ b/tests/tck/features/optimizer/ElimintateInvalidProp.feature @@ -0,0 +1,37 @@ +# Copyright (c) 2023 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +Feature: Eliminate invalid property filter + Examples: + | space_name | + | nba | + | nba_int_vid | + + Background: + Given a graph with space named "" + + Scenario: Elimintate Not Exists Tag + When profiling query: + """ + MATCH (v:player) WHERE v.not_exists_tag.name == 'Tim Duncan' RETURN v.player.age AS age + """ + Then the result should be, in any order: + | age | + And the execution plan should be: + | id | name | dependencies | operator info | + | 0 | Project | 1 | | + | 1 | Value | 2 | | + | 2 | Start | | | + + Scenario: Elimintate Not Exists Property + When profiling query: + """ + MATCH (v:player) WHERE v.player.not_exists_prop == 'Tim Duncan' RETURN v.player.age AS age + """ + Then the result should be, in any order: + | age | + And the execution plan should be: + | id | name | dependencies | operator info | + | 0 | Project | 1 | | + | 1 | Value | 2 | | + | 2 | Start | | | diff --git a/tests/tck/features/optimizer/EmbedEdgeAllPredIntoTraverseRule.feature b/tests/tck/features/optimizer/EmbedEdgeAllPredIntoTraverseRule.feature index e4e5c1abc09..b443f587649 100644 --- a/tests/tck/features/optimizer/EmbedEdgeAllPredIntoTraverseRule.feature +++ b/tests/tck/features/optimizer/EmbedEdgeAllPredIntoTraverseRule.feature @@ -355,3 +355,355 @@ Feature: Embed edge all predicate into Traverse | 39 | Traverse | 17 | | | | 17 | Traverse | 16 | | | | 16 | Argument | | | | + + Scenario: Embed edge all predicate into Traverse with path variable + When profiling query: + """ + MATCH p= (v:player)-[e:like*1]->(n) + WHERE all(i in e where i.likeness>90) + RETURN [i in e | i.likeness] AS likeness, n.player.age AS nage + """ + Then the result should be, in any order: + | likeness | nage | + | [99] | 33 | + | [99] | 31 | + | [99] | 29 | + | [99] | 30 | + | [99] | 25 | + | [99] | 34 | + | [99] | 41 | + | [99] | 32 | + | [99] | 30 | + | [99] | 42 | + | [99] | 36 | + | [95] | 41 | + | [95] | 42 | + | [100] | 31 | + | [95] | 30 | + | [95] | 41 | + | [95] | 36 | + | [100] | 43 | + | [99] | 34 | + | [99] | 38 | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 7 | Project | 11 | | | + | 11 | AppendVertices | 13 | | | + | 13 | Traverse | 1 | | {"filter": "(like.likeness>90)"} | + | 1 | IndexScan | 2 | | | + | 2 | Start | | | | + When profiling query: + """ + MATCH p= (v:player)-[e:like*1]->(n) + WHERE all(i in e where i.likeness>90 or i.likeness<0) + RETURN [i in e | i.likeness] AS likeness, n.player.age AS nage + """ + Then the result should be, in any order: + | likeness | nage | + | [99] | 33 | + | [99] | 31 | + | [99] | 29 | + | [99] | 30 | + | [99] | 25 | + | [99] | 34 | + | [99] | 41 | + | [99] | 32 | + | [99] | 30 | + | [99] | 42 | + | [99] | 36 | + | [95] | 41 | + | [95] | 42 | + | [100] | 31 | + | [95] | 30 | + | [95] | 41 | + | [95] | 36 | + | [100] | 43 | + | [99] | 34 | + | [99] | 38 | + | [-1] | 43 | + | [-1] | 33 | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 7 | Project | 11 | | | + | 11 | AppendVertices | 13 | | | + | 13 | Traverse | 1 | | {"filter": "((like.likeness>90) OR (like.likeness<0))"} | + | 1 | IndexScan | 2 | | | + | 2 | Start | | | | + When profiling query: + """ + MATCH p= (v:player)-[e:like*1]->(n) + WHERE all(i in e where i.likeness>90 and v.player.name <> "x") + RETURN [i in e | i.likeness] AS likeness, n.player.age AS nage + """ + Then the result should be, in any order: + | likeness | nage | + | [99] | 33 | + | [99] | 31 | + | [99] | 29 | + | [99] | 30 | + | [99] | 25 | + | [99] | 34 | + | [99] | 41 | + | [99] | 32 | + | [99] | 30 | + | [99] | 42 | + | [99] | 36 | + | [95] | 41 | + | [95] | 42 | + | [100] | 31 | + | [95] | 30 | + | [95] | 41 | + | [95] | 36 | + | [100] | 43 | + | [99] | 34 | + | [99] | 38 | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 7 | Project | 11 | | | + | 11 | Filter | 11 | | {"condition": "all(__VAR_0 IN $-.e WHERE (($__VAR_0.likeness>90) AND ($-.v.player.name!=\"x\")))"} | + | 11 | AppendVertices | 13 | | | + | 13 | Traverse | 1 | | | + | 1 | IndexScan | 2 | | | + | 2 | Start | | | | + When profiling query: + """ + MATCH p= (v:player)-[e:like*2]->(n) + WHERE all(i in e where i.likeness>90) + RETURN [i in e | i.likeness] AS likeness, n.player.age AS nage + """ + Then the result should be, in any order: + | likeness | nage | + | [99, 100] | 43 | + | [99, 95] | 41 | + | [99, 95] | 36 | + | [99, 95] | 41 | + | [99, 95] | 42 | + | [95, 95] | 41 | + | [95, 95] | 36 | + | [95, 95] | 41 | + | [95, 95] | 42 | + | [99, 99] | 38 | + | [99, 99] | 34 | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 7 | Project | 11 | | | + | 11 | AppendVertices | 13 | | | + | 13 | Traverse | 1 | | {"filter": "(like.likeness>90)"} | + | 1 | IndexScan | 2 | | | + | 2 | Start | | | | + When profiling query: + """ + MATCH p= (v:player)-[e:like*2..4]->(n) + WHERE all(i in e where i.likeness>90) + RETURN [i in e | i.likeness] AS likeness, n.player.age AS nage + """ + Then the result should be, in any order: + | likeness | nage | + | [99, 100] | 43 | + | [99, 95] | 41 | + | [99, 95] | 36 | + | [99, 95] | 41 | + | [99, 95] | 42 | + | [99, 95, 95] | 41 | + | [99, 95, 95] | 42 | + | [99, 95, 95] | 41 | + | [99, 95, 95] | 36 | + | [99, 95, 95, 95] | 41 | + | [99, 95, 95, 95] | 41 | + | [95, 95] | 41 | + | [95, 95] | 36 | + | [95, 95, 95] | 41 | + | [95, 95] | 41 | + | [95, 95] | 42 | + | [95, 95, 95] | 41 | + | [99, 99] | 38 | + | [99, 99] | 34 | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 7 | Project | 11 | | | + | 11 | AppendVertices | 13 | | | + | 13 | Traverse | 1 | | {"filter": "(like.likeness>90)"} | + | 1 | IndexScan | 2 | | | + | 2 | Start | | | | + When profiling query: + """ + MATCH p= (v:player)-[e:like*0..5]->(n) + WHERE all(i in e where i.likeness>90) AND size(e)>0 AND (n.player.age>0) == true + RETURN [i in e | i.likeness] AS likeness, n.player.age AS nage + """ + Then the result should be, in any order: + | likeness | nage | + | [99, 99] | 34 | + | [99] | 38 | + | [99, 99] | 38 | + | [99] | 34 | + | [100] | 43 | + | [95, 95, 95] | 41 | + | [95, 95] | 42 | + | [95, 95] | 41 | + | [95] | 36 | + | [95] | 41 | + | [95] | 30 | + | [100] | 31 | + | [95, 95, 95] | 41 | + | [95, 95] | 36 | + | [95, 95] | 41 | + | [95] | 42 | + | [95] | 41 | + | [99, 95, 95, 95] | 41 | + | [99, 95, 95, 95] | 41 | + | [99, 95, 95] | 36 | + | [99, 95, 95] | 41 | + | [99, 95, 95] | 42 | + | [99, 95, 95] | 41 | + | [99, 95] | 42 | + | [99, 95] | 41 | + | [99, 95] | 36 | + | [99, 95] | 41 | + | [99, 100] | 43 | + | [99] | 36 | + | [99] | 42 | + | [99] | 30 | + | [99] | 32 | + | [99] | 41 | + | [99] | 34 | + | [99] | 25 | + | [99] | 30 | + | [99] | 29 | + | [99] | 31 | + | [99] | 33 | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 7 | Project | 15 | | | + | 15 | Filter | 11 | | {"condition": "(($-.n.player.age>0)==true)"} | + | 11 | AppendVertices | 14 | | | + | 14 | Filter | 13 | | {"condition": "(size($-.e)>0)"} | + | 13 | Traverse | 1 | | {"edge filter": "(*.likeness>90)"} | + | 1 | IndexScan | 2 | | | + | 2 | Start | | | | + When profiling query: + """ + MATCH p= (v:player)-[e:like*1..5]->(n) + WHERE all(i in e where i.likeness>90) AND size(e)>0 AND (n.player.age>0) == true + RETURN [i in e | i.likeness] AS likeness, n.player.age AS nage + """ + Then the result should be, in any order: + | likeness | nage | + | [99, 99] | 34 | + | [99] | 38 | + | [99, 99] | 38 | + | [99] | 34 | + | [100] | 43 | + | [95, 95, 95] | 41 | + | [95, 95] | 42 | + | [95, 95] | 41 | + | [95] | 36 | + | [95] | 41 | + | [95] | 30 | + | [100] | 31 | + | [95, 95, 95] | 41 | + | [95, 95] | 36 | + | [95, 95] | 41 | + | [95] | 42 | + | [95] | 41 | + | [99, 95, 95, 95] | 41 | + | [99, 95, 95, 95] | 41 | + | [99, 95, 95] | 36 | + | [99, 95, 95] | 41 | + | [99, 95, 95] | 42 | + | [99, 95, 95] | 41 | + | [99, 95] | 42 | + | [99, 95] | 41 | + | [99, 95] | 36 | + | [99, 95] | 41 | + | [99, 100] | 43 | + | [99] | 36 | + | [99] | 42 | + | [99] | 30 | + | [99] | 32 | + | [99] | 41 | + | [99] | 34 | + | [99] | 25 | + | [99] | 30 | + | [99] | 29 | + | [99] | 31 | + | [99] | 33 | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 7 | Project | 15 | | | + | 15 | Filter | 11 | | {"condition": "(($-.n.player.age>0)==true)"} | + | 11 | AppendVertices | 14 | | | + | 14 | Filter | 13 | | {"condition": "(size($-.e)>0)"} | + | 13 | Traverse | 1 | | {"filter": "(like.likeness>90)"} | + | 1 | IndexScan | 2 | | | + | 2 | Start | | | | + When profiling query: + """ + MATCH p= (v:player)-[e:like*0..5]->(n) + WHERE all(i in e where i.likeness>90) AND not all(i in e where i.likeness > 89) + RETURN [i in e | i.likeness] AS likeness, n.player.age AS nage + """ + Then the result should be, in any order: + | likeness | nage | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 7 | Project | 11 | | | + | 11 | AppendVertices | 14 | | | + | 14 | Filter | 13 | | {"condition": "!(all(__VAR_1 IN $-.e WHERE ($__VAR_1.likeness>89)))"} | + | 13 | Traverse | 1 | | {"edge filter": "(*.likeness>90)"} | + | 1 | IndexScan | 2 | | | + | 2 | Start | | | | + When profiling query: + """ + MATCH p1= (person:player)-[e1:like*1..2]-(friend:player) + WHERE id(person) == "Tony Parker" AND id(friend) != "Tony Parker" AND all(i in e1 where i.likeness > 0) + MATCH p2= (friend)-[served:serve]->(friendTeam:team) + WHERE served.start_year > 2010 AND all(i in e1 where i.likeness > 1) + WITH DISTINCT friend, friendTeam + OPTIONAL MATCH p3= (friend)<-[e2:like*2..4]-(friend2:player)<-[:like]-(friendTeam) + WITH friendTeam, count(friend2) AS numFriends, e2 + WHERE e2 IS NULL OR all(i in e2 where i IS NULL) + RETURN + friendTeam.team.name AS teamName, + numFriends, + [i in e2 | i.likeness] AS likeness + ORDER BY teamName DESC + LIMIT 8 + """ + Then the result should be, in any order, with relax comparison: + | teamName | numFriends | likeness | + | "Warriors" | 0 | NULL | + | "Trail Blazers" | 0 | NULL | + | "Spurs" | 0 | NULL | + | "Rockets" | 0 | NULL | + | "Raptors" | 0 | NULL | + | "Pistons" | 0 | NULL | + | "Lakers" | 0 | NULL | + | "Kings" | 0 | NULL | + And the execution plan should be: + | id | name | dependencies | profiling data | operator info | + | 28 | TopN | 24 | | | + | 24 | Project | 30 | | | + | 30 | Aggregate | 29 | | | + | 29 | Filter | 44 | | {"condition": "($e2 IS NULL OR all(__VAR_2 IN $e2 WHERE $__VAR_2 IS NULL))"} | + | 44 | HashLeftJoin | 15,43 | | | + | 15 | Dedup | 14 | | | + | 14 | Project | 35 | | | + | 35 | HashInnerJoin | 53,50 | | | + | 53 | Project | 53 | | | + | 53 | Filter | 52 | | {"condition": "(id($-.friend)!=\"Tony Parker\")"} | + | 52 | AppendVertices | 59 | | | + | 59 | Filter | 60 | | {"condition": "(id($-.person)==\"Tony Parker\")"} | + | 60 | Traverse | 2 | | {"filter": "((like.likeness>0) AND (like.likeness>1))"} | + | 2 | Dedup | 1 | | | + | 1 | PassThrough | 3 | | | + | 3 | Start | | | | + | 50 | Project | 55 | | | + | 55 | AppendVertices | 61 | | | + | 61 | Traverse | 8 | | | + | 8 | Argument | | | | + | 43 | Project | 39 | | | + | 39 | AppendVertices | 39 | | | + | 39 | Traverse | 17 | | | + | 17 | Traverse | 16 | | | + | 16 | Argument | | | | diff --git a/tests/tck/features/optimizer/PushFilterDownHashInnerJoinRule.feature b/tests/tck/features/optimizer/PushFilterDownHashInnerJoinRule.feature index b4916b86299..77c90b229fd 100644 --- a/tests/tck/features/optimizer/PushFilterDownHashInnerJoinRule.feature +++ b/tests/tck/features/optimizer/PushFilterDownHashInnerJoinRule.feature @@ -263,19 +263,18 @@ Feature: Push Filter down HashInnerJoin rule | [:like "Dejounte Murray"->"Tim Duncan" @0 {likeness: 99}] | ("Dejounte Murray" :player{age: 29, name: "Dejounte Murray"}) | | [:like "Dejounte Murray"->"Tim Duncan" @0 {likeness: 99}] | ("Dejounte Murray" :player{age: 29, name: "Dejounte Murray"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 20 | TopN | 15 | | - | 15 | Project | 14 | | - | 14 | Filter | 13 | { "condition": "(($e.likeness>90) OR (vv.team.start_year>2000))" } | - | 13 | HashInnerJoin | 16,12 | | - | 16 | Project | 5 | | - | 5 | AppendVertices | 17 | | - | 17 | Traverse | 2 | | - | 2 | Dedup | 1 | | - | 1 | PassThrough | 3 | | - | 3 | Start | | | - | 12 | Project | 11 | | - | 11 | AppendVertices | 10 | | - | 10 | Traverse | 8 | | - | 8 | Argument | 9 | | - | 9 | Start | | | + | id | name | dependencies | operator info | + | 20 | TopN | 15 | | + | 15 | Project | 13 | | + | 13 | HashInnerJoin | 16,12 | | + | 16 | Project | 5 | | + | 5 | AppendVertices | 17 | | + | 17 | Traverse | 2 | | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 3 | | + | 3 | Start | | | + | 12 | Project | 11 | | + | 11 | AppendVertices | 10 | | + | 10 | Traverse | 8 | | + | 8 | Argument | 9 | | + | 9 | Start | | |