diff --git a/.github/workflows/build-graphscope-images-linux.yml b/.github/workflows/build-graphscope-images-linux.yml index e91af07bdf00..f41f1d82bcd7 100644 --- a/.github/workflows/build-graphscope-images-linux.yml +++ b/.github/workflows/build-graphscope-images-linux.yml @@ -95,7 +95,7 @@ jobs: sudo docker push ${{ env.REGISTRY }}/graphscope/learning:${tag} # dataset image - # Note! dataset image are built mannually just use the latest one. + # Note! dataset image are built manually just use the latest one. sudo docker pull ${{ env.REGISTRY }}/graphscope/dataset:latest sudo docker tag ${{ env.REGISTRY }}/graphscope/dataset:latest ${{ env.REGISTRY }}/graphscope/dataset:${tag} sudo docker push ${{ env.REGISTRY }}/graphscope/dataset:${tag} @@ -134,7 +134,7 @@ jobs: sudo docker push ${{ env.REGISTRY }}/graphscope/learning:${tag} # dataset image - # Note! dataset image are built mannually just use the latest one. + # Note! dataset image are built manually just use the latest one. sudo docker pull ${{ env.REGISTRY }}/graphscope/dataset:latest sudo docker tag ${{ env.REGISTRY }}/graphscope/dataset:latest ${{ env.REGISTRY }}/graphscope/dataset:${tag} sudo docker push ${{ env.REGISTRY }}/graphscope/dataset:${tag} diff --git a/.github/workflows/build-graphscope-wheels-linux.yml b/.github/workflows/build-graphscope-wheels-linux.yml index 04e40cce9bae..2f13331bc8de 100644 --- a/.github/workflows/build-graphscope-wheels-linux.yml +++ b/.github/workflows/build-graphscope-wheels-linux.yml @@ -57,7 +57,7 @@ jobs: tar -zcf client.tar.gz python/dist/wheelhouse/*.whl tar -zcf graphscope.tar.gz coordinator/dist/ - # move wheels into one floder to upload to PyPI + # move wheels into one folder to upload to PyPI mkdir ${GITHUB_WORKSPACE}/upload_pypi mv ${GITHUB_WORKSPACE}/python/dist/wheelhouse/*.whl ${GITHUB_WORKSPACE}/upload_pypi/ mv ${GITHUB_WORKSPACE}/coordinator/dist/wheelhouse/*.whl ${GITHUB_WORKSPACE}/upload_pypi/ @@ -139,7 +139,7 @@ jobs: tar -zcf client.tar.gz python/dist/wheelhouse/*.whl tar -zcf graphscope.tar.gz coordinator/dist/ - # move wheels into one floder to upload to PyPI + # move wheels into one folder to upload to PyPI mkdir ${GITHUB_WORKSPACE}/upload_pypi mv ${GITHUB_WORKSPACE}/python/dist/wheelhouse/*.whl ${GITHUB_WORKSPACE}/upload_pypi/ mv ${GITHUB_WORKSPACE}/coordinator/dist/wheelhouse/*.whl ${GITHUB_WORKSPACE}/upload_pypi/ diff --git a/.github/workflows/build-graphscope-wheels-macos.yml b/.github/workflows/build-graphscope-wheels-macos.yml index 05f218b64739..8eff736d7c1d 100644 --- a/.github/workflows/build-graphscope-wheels-macos.yml +++ b/.github/workflows/build-graphscope-wheels-macos.yml @@ -245,7 +245,7 @@ jobs: env: PYTHON: ${{ matrix.python-version }} run: | - # move wheels into one floder to upload to PyPI + # move wheels into one folder to upload to PyPI mkdir ${GITHUB_WORKSPACE}/upload_pypi cd ${GITHUB_WORKSPACE}/artifacts diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index c1dabecb7f45..c125bdc4a1ee 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -158,7 +158,7 @@ jobs: sudo apt update sudo apt install -y doxygen graphviz - # generate a taged version + # generate a tagged version cd ${GITHUB_WORKSPACE} make graphscope-docs diff --git a/.github/workflows/pegasus.yml b/.github/workflows/pegasus.yml index d947b6e14e4e..31978be2b062 100644 --- a/.github/workflows/pegasus.yml +++ b/.github/workflows/pegasus.yml @@ -56,7 +56,7 @@ jobs: # - name: Detect the tmate session # run: | # if grep -v "grep" .github/workflows/pegasus.yml | grep "action-tmate"; then -# echo 'WARNING!!!the self-hosted machine can not run tmate session, please debug it manually' +# echo 'WARNING!!!the self-hosted machine cannot run tmate session, please debug it manually' # exit 1 # fi # diff --git a/Makefile b/Makefile index ce95851582a3..88e209771052 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ PIP_ARGS = --timeout=1000 --no-cache-dir ## Common .PHONY: all graphscope install clean -# coordinator relys on client, which relys on learning +# coordinator relies on client, which relies on learning all: coordinator analytical interactive graphscope: all diff --git a/analytical_engine/CMakeLists.txt b/analytical_engine/CMakeLists.txt index 473d7d3a5eb1..fa5853dce7d4 100644 --- a/analytical_engine/CMakeLists.txt +++ b/analytical_engine/CMakeLists.txt @@ -352,7 +352,7 @@ if (${LIBUNWIND_FOUND}) target_link_libraries(grape_engine PRIVATE ${LIBUNWIND_LIBRARIES}) endif () -# An executable to work around for graphx pregel. +# An executable workaround for graphx pregel. if (ENABLE_JAVA_SDK) add_executable(graphx_runner core/java/graphx_runner.cc core/java/javasdk.cc) target_include_directories(graphx_runner PRIVATE core utils apps) @@ -502,7 +502,7 @@ if(ENABLE_JAVA_SDK) set(GAE_JAVA_RUNTIME_JAR "${GAE_JAVA_RUNTIME_DIR}/target/grape-runtime-${GRAPHSCOPE_ANALYTICAL_JAR_VERSION}-shaded.jar") set(GAE_JAVA_GRAPHX_JAR "${GAE_JAVA_DIR}/grape-graphx/target/grape-graphx-${GRAPHSCOPE_ANALYTICAL_JAR_VERSION}-shaded.jar") set(GAE_JAVA_GIRAPH_JAR "${GAE_JAVA_DIR}/grape-giraph/target/grape-giraph-${GRAPHSCOPE_ANALYTICAL_JAR_VERSION}-shaded.jar") - # condiationally set grape-jni's name according to platform + # conditionally set grape-jni's name according to platform if (APPLE) set(GAE_JAVA_JNI_LIB "${GAE_JAVA_DIR}/grape-runtime/target/native/libgrape-jni.dylib") else () diff --git a/analytical_engine/apps/apsp/all_pairs_shortest_path_length.h b/analytical_engine/apps/apsp/all_pairs_shortest_path_length.h index 8d76affb9b8b..fed873d20130 100644 --- a/analytical_engine/apps/apsp/all_pairs_shortest_path_length.h +++ b/analytical_engine/apps/apsp/all_pairs_shortest_path_length.h @@ -64,7 +64,7 @@ class AllPairsShortestPathLength // unweighted graph, use bfs. this->bfs(frag, v, ctx); } else { - // weighted graph, use dijstra. + // weighted graph, use dijkstra. this->dijkstraLength(frag, v, ctx); } }); diff --git a/analytical_engine/apps/assortativity/attribute_assortativity.h b/analytical_engine/apps/assortativity/attribute_assortativity.h index b51fbdfa27fd..1060db98e3ec 100644 --- a/analytical_engine/apps/assortativity/attribute_assortativity.h +++ b/analytical_engine/apps/assortativity/attribute_assortativity.h @@ -167,7 +167,7 @@ class AttributeAssortativity /** * @brief merge attribute mixing map of all workers in worker 0 and the result - * is saved in the contxt of worker 0. + * is saved in the context of worker 0. * * @param ctx * @param messages diff --git a/analytical_engine/apps/assortativity/degree_assortativity_coefficient.h b/analytical_engine/apps/assortativity/degree_assortativity_coefficient.h index 6b0f66de39e2..678d407689b7 100644 --- a/analytical_engine/apps/assortativity/degree_assortativity_coefficient.h +++ b/analytical_engine/apps/assortativity/degree_assortativity_coefficient.h @@ -145,7 +145,7 @@ class DegreeAssortativity * @brief get the degree of vertex * * @param frag - * @param veretx + * @param vertex * @param type IN or OUT * @param ctx */ diff --git a/analytical_engine/apps/assortativity/utils.h b/analytical_engine/apps/assortativity/utils.h index 85861b628c9d..9ff9b11ae2a2 100644 --- a/analytical_engine/apps/assortativity/utils.h +++ b/analytical_engine/apps/assortativity/utils.h @@ -80,7 +80,7 @@ double ProcessMatrix(std::vector>& degree_mixing_matrix, } /** - * @brief deterimine if type T can convert to type U in compile-time. + * @brief determine if type T can convert to type U in compile-time. * * @tparam T * @tparam U diff --git a/analytical_engine/apps/centrality/betweenness/betweenness_centrality_generic.h b/analytical_engine/apps/centrality/betweenness/betweenness_centrality_generic.h index 8aa984b2d66c..4ce97d44a920 100644 --- a/analytical_engine/apps/centrality/betweenness/betweenness_centrality_generic.h +++ b/analytical_engine/apps/centrality/betweenness/betweenness_centrality_generic.h @@ -69,7 +69,7 @@ class BetweennessCentralityGeneric // unweighted graph, use bfs. this->bfs(frag, v, ctx); } else { - // weighted graph, use dijstra. + // weighted graph, use dijkstra. this->dijkstra(frag, v, ctx); } }); diff --git a/analytical_engine/apps/java_pie/java_pie_property_parallel_app.h b/analytical_engine/apps/java_pie/java_pie_property_parallel_app.h index a3f715005359..5d7702d5563b 100644 --- a/analytical_engine/apps/java_pie/java_pie_property_parallel_app.h +++ b/analytical_engine/apps/java_pie/java_pie_property_parallel_app.h @@ -33,7 +33,7 @@ limitations under the License. namespace gs { /** - * @brief This is a driver app for Java property prallel app. The driven java + * @brief This is a driver app for Java property parallel app. The driven java * app should be inherited from ParallelPropertyAppBase. * * @tparam FRAG_T Should be vineyard::ArrowFragment<...> diff --git a/analytical_engine/apps/pregel/aggregators_test.h b/analytical_engine/apps/pregel/aggregators_test.h index 2141391d6058..ba1bfefaef1d 100644 --- a/analytical_engine/apps/pregel/aggregators_test.h +++ b/analytical_engine/apps/pregel/aggregators_test.h @@ -146,7 +146,7 @@ class AggregatorsTest assert(rlt.size() == 81308); } - // terminate itera + // terminate iterator v.vote_to_halt(); } } diff --git a/analytical_engine/apps/pregel/louvain/louvain.h b/analytical_engine/apps/pregel/louvain/louvain.h index fc29c11ccbed..ce0a5a0037c5 100644 --- a/analytical_engine/apps/pregel/louvain/louvain.h +++ b/analytical_engine/apps/pregel/louvain/louvain.h @@ -135,7 +135,7 @@ class PregelLouvain v.vote_to_halt(); return; } - // at the start of each full pass check to see wether progress is still + // at the start of each full pass check to see whether progress is still // being made, if not halt if (current_minor_step == phase_one_minor_step_1 && current_iteration > 0 && current_iteration % 2 == 0) { diff --git a/analytical_engine/apps/pregel/louvain/louvain_vertex.h b/analytical_engine/apps/pregel/louvain/louvain_vertex.h index 8c84abc2502a..3f04e1a30433 100644 --- a/analytical_engine/apps/pregel/louvain/louvain_vertex.h +++ b/analytical_engine/apps/pregel/louvain/louvain_vertex.h @@ -29,7 +29,7 @@ limitations under the License. namespace gs { /** - * @brief LouvainVertex is a specific PregelVertex for louvain alorithm. + * @brief LouvainVertex is a specific PregelVertex for louvain algorithm. * LouvainVertex provides communication-related method to send messages to * certain fragment and also access to context of louvain. * @tparam FRAG_T diff --git a/analytical_engine/cmake/FindRdkafka.cmake b/analytical_engine/cmake/FindRdkafka.cmake index f53b0e3cf734..764576ad36bc 100644 --- a/analytical_engine/cmake/FindRdkafka.cmake +++ b/analytical_engine/cmake/FindRdkafka.cmake @@ -1,4 +1,4 @@ -# This file is used to find librdkafka library in CMake script, modifeid from the +# This file is used to find librdkafka library in CMake script, modified from the # code from # # https://github.com/BVLC/caffe/blob/master/cmake/Modules/FindGlog.cmake diff --git a/analytical_engine/core/context/java_context_base.h b/analytical_engine/core/context/java_context_base.h index 021af7f27611..d31b96c3c142 100644 --- a/analytical_engine/core/context/java_context_base.h +++ b/analytical_engine/core/context/java_context_base.h @@ -140,7 +140,7 @@ class JavaContextBase : public grape::ContextBase { protected: virtual const char* evalDescriptor() = 0; - // Set frag_group_id to zero inidicate not available. + // Set frag_group_id to zero indicates not available. void init(jlong messages_addr, const char* java_message_manager_name, const std::string& params, const std::string& lib_path, int local_num = 1) { @@ -209,7 +209,7 @@ class JavaContextBase : public grape::ContextBase { { jobject json_object = createArgsObject(env, args_str); // 3.1 If we find a setClassLoaderMethod, then we invoke.(NOt - // neccessary) this is specially for giraph adaptors + // necessary) this is specially for giraph adaptors setContextClassLoader(env, context_class); // 4. Invoke java method @@ -279,7 +279,7 @@ class JavaContextBase : public grape::ContextBase { return std::string(user_class_path); } // user library name should be absolute - // serial path is used in graphx, to specify the path to serializaed class + // serial path is used in graphx, to specify the path to serialized class // objects of vd,ed.etc. std::string parseParamsAndSetupJVMEnv(const std::string& params, const std::string lib_path, @@ -342,7 +342,7 @@ class JavaContextBase : public grape::ContextBase { // vineyard_id(frag_group_id) // pt.put("vineyard_id", frag_group_id); - // JVM runtime opt should consists of java.libaray.path and + // JVM runtime opt should consists of java.libarray.path and // java.class.path maybe this should be set by the backend not user. std::string grape_jvm_opt = generate_jvm_opts(); if (!grape_jvm_opt.empty()) { diff --git a/analytical_engine/core/fragment/arrow_flattened_fragment.h b/analytical_engine/core/fragment/arrow_flattened_fragment.h index 7be36ae4eae4..2dcafd41237c 100644 --- a/analytical_engine/core/fragment/arrow_flattened_fragment.h +++ b/analytical_engine/core/fragment/arrow_flattened_fragment.h @@ -242,7 +242,7 @@ struct NbrDefault { }; /** - * @brief Union of all iteratable adjencent lists of a vertex. The union + * @brief Union of all iteratable adjacent lists of a vertex. The union * list contains all neighbors in format of NbrDefault, which contains the other * Node and the data on the Edge. The lists must be non-empty to construct the * UnionAdjList. @@ -313,7 +313,7 @@ class UnionAdjList { pointer_type operator->() noexcept { return &curr_nbr_; } - // The only the interator's `operator++()` is exposed to the external + // Only the iterator's `operator++()` is exposed to the external // programs so we only need to check the validity here, and nothing // to do with the `operator++()` of NbrDefault. inline void move_to_next_valid_nbr() { @@ -387,7 +387,7 @@ class UnionAdjList { pointer_type operator->() noexcept { return curr_nbr_; } - // The only the interator's `operator++()` is exposed to the external + // The only the iterator's `operator++()` is exposed to the external // programs so we only need to check the validity here, and nothing // to do with the `operator++()` of NbrDefault. inline void move_to_next_valid_nbr() { diff --git a/analytical_engine/core/fragment/arrow_projected_fragment.h b/analytical_engine/core/fragment/arrow_projected_fragment.h index 8a1e2f368110..7cf001e185d6 100644 --- a/analytical_engine/core/fragment/arrow_projected_fragment.h +++ b/analytical_engine/core/fragment/arrow_projected_fragment.h @@ -1237,7 +1237,7 @@ class ArrowProjectedFragment inline size_t GetOutEdgeNum() const { return oenum_; } - /* Get outging edges num from this frag*/ + /* Get outgoing edges num from this frag*/ inline size_t GetOutgoingEdgeNum() const { return static_cast(oe_offsets_end_->Value(ivnum_ - 1) - oe_offsets_begin_->Value(0)); @@ -1315,8 +1315,8 @@ class ArrowProjectedFragment return vm_ptr_->GetGid(internal_oid_t(oid), gid); } - // For Java use, can not use Oid2Gid(const oid_t & oid, vid_t & gid) since - // Java can not pass vid_t by reference. + // For Java use, cannot use Oid2Gid(const oid_t & oid, vid_t & gid) since + // Java cannot pass vid_t by reference. inline vid_t Oid2Gid(const oid_t& oid) const { vid_t gid; if (vm_ptr_->GetGid(internal_oid_t(oid), gid)) { diff --git a/analytical_engine/core/java/java_messages.h b/analytical_engine/core/java/java_messages.h index ac058ba19dfb..04a17c651e93 100644 --- a/analytical_engine/core/java/java_messages.h +++ b/analytical_engine/core/java/java_messages.h @@ -26,7 +26,7 @@ namespace gs { /** - * @brief Since Java can not pass Double, Long as reference, we need a wrapper + * @brief Since Java cannot pass Double, Long as reference, we need a wrapper * for primitives. */ template diff --git a/analytical_engine/core/server/graphscope_service.cc b/analytical_engine/core/server/graphscope_service.cc index 3e646a8ffae6..c4de7b9be83b 100644 --- a/analytical_engine/core/server/graphscope_service.cc +++ b/analytical_engine/core/server/graphscope_service.cc @@ -121,7 +121,7 @@ ::grpc::Status GraphScopeService::RunStep( if (!success) { op_result->set_error_msg(error_msgs); - // break dag exection flow + // break dag execution flow stream->Write(response_head); return Status(StatusCode::INTERNAL, error_msgs); } diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/app/GiraphComputationAdaptor.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/app/GiraphComputationAdaptor.java index 8995c6dc2737..0c1320f9ca43 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/app/GiraphComputationAdaptor.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/app/GiraphComputationAdaptor.java @@ -170,7 +170,7 @@ public void PEval( userComputation.incStep(); workerContext.setCurStep(1); - // We can not judge whether to proceed by messages sent and check halted array. + // We cannot judge whether to proceed by messages sent and check halted array. logger.info( "Any msg received: {} all halted {}", giraphMessageManager.anyMessageReceived(), diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/graph/GiraphEdgeManager.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/graph/GiraphEdgeManager.java index 42444108b327..d6fa17c2ed43 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/graph/GiraphEdgeManager.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/graph/GiraphEdgeManager.java @@ -70,7 +70,7 @@ public interface GiraphEdgeManager diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/cache/impl/BatchWritableMessageCache.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/cache/impl/BatchWritableMessageCache.java index aae65d1b128b..d2e95966b5aa 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/cache/impl/BatchWritableMessageCache.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/cache/impl/BatchWritableMessageCache.java @@ -67,7 +67,7 @@ public BatchWritableMessageCache( cache = new Gid2Data[fragNum]; for (int i = 0; i < fragNum; ++i) { if (i == fragId) { - // Message to self can be resiable. + // Message to self can be resizable. cache[i] = Gid2Data.newResizable(cacheSize); } else { cache[i] = Gid2Data.newFixed(cacheSize); diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/mm/GiraphMessageManager.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/mm/GiraphMessageManager.java index 0236330283d7..cd18a672d3be 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/mm/GiraphMessageManager.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/mm/GiraphMessageManager.java @@ -46,7 +46,7 @@ public interface GiraphMessageManager< * Check any message available on this vertex. * * @param lid local id - * @return true if recevied messages. + * @return true if received messages. */ boolean messageAvailable(long lid); diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/mm/impl/AbstractMessageManager.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/mm/impl/AbstractMessageManager.java index 2bb305e23c35..0b6af60edc29 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/mm/impl/AbstractMessageManager.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/mm/impl/AbstractMessageManager.java @@ -132,7 +132,7 @@ public Iterable getMessages(long lid) { * Check any message available on this vertex. * * @param lid local id - * @return true if recevied messages. + * @return true if received messages. */ @Override public boolean messageAvailable(long lid) { diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/netty/NettyClient.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/netty/NettyClient.java index ef35328cb932..e6c76f332cd0 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/netty/NettyClient.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/parallel/netty/NettyClient.java @@ -264,7 +264,7 @@ private void waitAllConnections() { try { TimeUnit.SECONDS.sleep(1); failedCnt += 1; - // When encounter failure, we update the futreu; + // When encounter failure, we update the future; ChannelFuture newFuture = bootstrap.connect(connection.address); connection.updateFuture(newFuture); } catch (InterruptedException e) { diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2Data.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2Data.java index c82143925171..490b917c4c5c 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2Data.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2Data.java @@ -36,7 +36,7 @@ static Gid2Data newFixed(int capacity) { /** * Number of bytes need for serialization. * - * @return number of butes + * @return number of bytes */ int serializedSize(); } diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2DataFixed.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2DataFixed.java index 8a1296888ada..c095103c5ef7 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2DataFixed.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2DataFixed.java @@ -70,7 +70,7 @@ public int size() { /** * Number of bytes need for serialization. * - * @return number of butes + * @return number of bytes */ @Override public int serializedSize() { diff --git a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2DataResizable.java b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2DataResizable.java index e9955c147ef3..836f22c55355 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2DataResizable.java +++ b/analytical_engine/java/grape-giraph/src/main/java/com/alibaba/graphscope/utils/Gid2DataResizable.java @@ -64,7 +64,7 @@ public int size() { /** * Number of bytes need for serialization. * - * @return number of butes + * @return number of bytes */ @Override public int serializedSize() { diff --git a/analytical_engine/java/grape-giraph/src/main/java/org/apache/giraph/conf/GiraphConstants.java b/analytical_engine/java/grape-giraph/src/main/java/org/apache/giraph/conf/GiraphConstants.java index 411eb0ca70c4..a4ce5f219ad4 100644 --- a/analytical_engine/java/grape-giraph/src/main/java/org/apache/giraph/conf/GiraphConstants.java +++ b/analytical_engine/java/grape-giraph/src/main/java/org/apache/giraph/conf/GiraphConstants.java @@ -473,7 +473,7 @@ public interface GiraphConstants { 20, "Maximum bind attempts for different IPC ports"); /** - * Maximum connections trys for client to connect to server + * Maximum connections tries for client to connect to server */ IntConfOption MAX_CONN_TRY_ATTEMPTS = new IntConfOption( diff --git a/analytical_engine/java/grape-graphx/src/main/scala/com/alibaba/graphscope/graphx/GSClientWrapper.scala b/analytical_engine/java/grape-graphx/src/main/scala/com/alibaba/graphscope/graphx/GSClientWrapper.scala index 3ce1c2e5e507..ce83aefc67d3 100644 --- a/analytical_engine/java/grape-graphx/src/main/scala/com/alibaba/graphscope/graphx/GSClientWrapper.scala +++ b/analytical_engine/java/grape-graphx/src/main/scala/com/alibaba/graphscope/graphx/GSClientWrapper.scala @@ -166,7 +166,7 @@ class GSClientWrapper( object GSClientWrapper { val RES_PATTERN = "res_str"; //A safe word which we append to the execution of python code, its appearance in - // output stream, indicating command has been successfully executoed. + // output stream, indicating command has been successfully executed. val SAFE_WORD = "Spark-GraphScope-OK" val graphNameCounter = new AtomicInteger(0) val VINEYARD_DEFAULT_SHARED_MEM = "10Gi" diff --git a/analytical_engine/java/grape-graphx/src/main/scala/com/alibaba/graphscope/graphx/rdd/impl/GrapeVertexPartition.scala b/analytical_engine/java/grape-graphx/src/main/scala/com/alibaba/graphscope/graphx/rdd/impl/GrapeVertexPartition.scala index f69a6e6b6e7a..a1ae97ccad02 100644 --- a/analytical_engine/java/grape-graphx/src/main/scala/com/alibaba/graphscope/graphx/rdd/impl/GrapeVertexPartition.scala +++ b/analytical_engine/java/grape-graphx/src/main/scala/com/alibaba/graphscope/graphx/rdd/impl/GrapeVertexPartition.scala @@ -513,7 +513,7 @@ object GrapeVertexPartition extends Logging { /** @param pid * @param initialized - * inidicate whether the pushed vertex data store has been filled with data array in shuffle + * indicates whether the pushed vertex data store has been filled with data array in shuffle * @param store */ def setVertexStore( diff --git a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/Pregel.scala b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/Pregel.scala index 2c2d1b0de1c0..244dcf259220 100644 --- a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/Pregel.scala +++ b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/Pregel.scala @@ -131,12 +131,12 @@ object Pregel extends Logging { ) /** The Pregel contains the following steps. - * 0) persist vertex data to the memory mapped address. So, that the update data can be readed + * 0) persist vertex data to the memory mapped address. So, that the update data can be read * by later launched mpi processes. * 1) Launch mpi processes to run pie query, which get the fragment ids from graph.fragIds. * 2) at the end of query, we get the result graph.Then how to pass the result graph from mpi * process to graphx executor? - * - We Assume common graph computing will not repartition the graph,vertices,and edgs. + * - We Assume common graph computing will not repartition the graph,vertices,and edges. * - We assume no graphx-related communication is not explicitly invoked. * - The computation result is set to vertex data. * diff --git a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/grape/GrapeGraphImpl.scala b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/grape/GrapeGraphImpl.scala index 673a0d3a3132..be158cd20abd 100644 --- a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/grape/GrapeGraphImpl.scala +++ b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/grape/GrapeGraphImpl.scala @@ -35,7 +35,7 @@ object GrapeGraphBackend extends Enumeration { } /** Creating a graph abstraction by combining vertex RDD and edge RDD together. Before doing this construction: - * - Both vertex RDD and edge RDD are available for map,fliter operators. + * - Both vertex RDD and edge RDD are available for map,filter operators. * - Both vertex RDD and edge RDD stores data in partitions When construct this graph, we will * - copy data out to shared-memory * - create mpi processes to load into fragment. diff --git a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/grape/impl/GrapeVertexRDDImpl.scala b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/grape/impl/GrapeVertexRDDImpl.scala index 786d8669d3c4..ead66f66bfb7 100644 --- a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/grape/impl/GrapeVertexRDDImpl.scala +++ b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/grape/impl/GrapeVertexRDDImpl.scala @@ -170,7 +170,7 @@ class GrapeVertexRDDImpl[VD] private[graphx] ( * - For offHeap rdd ,just do the aggregation, since its partitioner is same with use, i.e. hashPartition with * fnum = num of workers. * - For common one, we need to make sure they share the same num of partitions. and then we repartition to - * size of excutors. + * size of executors. * @param messages * @param reduceFunc * @tparam VD2 diff --git a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala index 071110e6162c..814b3d616da3 100644 --- a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala +++ b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala @@ -65,7 +65,7 @@ import scala.reflect.ClassTag * neighbors which link to `i` and `outDeg[j]` is the out degree of vertex `j`. * * @note This is not the "normalized" PageRank and as a consequence pages that have no - * inlinks will have a PageRank of alpha. + * in-links will have a PageRank of alpha. */ object PageRank extends Logging { diff --git a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/sql/GSSparkSession.scala b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/sql/GSSparkSession.scala index 0d5b5a115472..0ca6d0e72c3b 100644 --- a/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/sql/GSSparkSession.scala +++ b/analytical_engine/java/grape-graphx/src/main/scala/org/apache/spark/sql/GSSparkSession.scala @@ -98,7 +98,7 @@ class GSSparkSession(sparkContext: SparkContext) extends SparkSession(sparkConte } /** Similar to methods defined in GraphLoader, same signature but read the edges to GraphScope store. Although - * the Graph structure is stored in c++, with wrappers based on JNI, RDD can not differ GS-based RDD with graphx + * the Graph structure is stored in c++, with wrappers based on JNI, RDD cannot differ GS-based RDD with graphx * rdd in java heap. */ def edgeListFile( @@ -751,7 +751,7 @@ object GSSparkSession extends Logging { */ def master(master: String): Builder = config("spark.master", master) - /** GraphgScope related param, setting vineyard memroy size. + /** GraphScope related param, setting vineyard memory size. */ def vineyardMemory(memoryStr: String): Builder = config("spark.gs.vineyard.memory", memoryStr) diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/communication/Communicator.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/communication/Communicator.java index 0a996a57c503..7184da0becc9 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/communication/Communicator.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/communication/Communicator.java @@ -39,7 +39,7 @@ public FFICommunicator getFFICommunicator() { } /** - * This function is set private, not intended to be invokede by user. It is meat to only be + * This function is set private, not intended to be invoked by user. It is meat to only be * called by jni, and let the exceptions accepted by cpp, so they can be obviously displayed. * * @param appAddr the address of the c++ app instance. diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/communication/FFICommunicator.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/communication/FFICommunicator.java index 0f9f1598ffcf..5ff1024e2268 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/communication/FFICommunicator.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/communication/FFICommunicator.java @@ -33,7 +33,7 @@ * href="https://github.com/alibaba/libgrape-lite/blob/master/grape/communication/communicator.h">grape * Communicator class, which provides some useful aggregators. Shall not be used by user. * - *

For User-defined msg types, the corrsponding code is generated by graphscope-processor. + *

For User-defined msg types, the corresponding code is generated by graphscope-processor. */ @FFIGen @CXXHead(GRAPE_COMMUNICATOR_H) diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/GrapeNbr.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/GrapeNbr.java index ab09b56495bd..3e314f7af1cf 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/GrapeNbr.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/GrapeNbr.java @@ -30,7 +30,7 @@ import com.alibaba.fastffi.FFITypeAlias; /** - * Java wrpper for grape::Nbr, * representing an edge with dst vertex and edge data. * diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/ProjectedAdjList.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/ProjectedAdjList.java index 1b8185c27044..2a2ad3aab597 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/ProjectedAdjList.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/ProjectedAdjList.java @@ -44,7 +44,7 @@ public interface ProjectedAdjList extends FFIPointer { /** - * Get the the first Nbr. + * Get the first Nbr. * * @return first Nbr. */ diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/ProjectedNbr.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/ProjectedNbr.java index c4c66f8221a9..1fa54747b8bf 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/ProjectedNbr.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/ProjectedNbr.java @@ -62,7 +62,7 @@ public interface ProjectedNbr extends NbrBase { /** * Self increment. * - * @return increated pointer. + * @return incremented pointer. */ @CXXOperator("++") @CXXReference diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/VertexSet.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/VertexSet.java index 9b24d2e78561..ba1c078d5ce8 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/VertexSet.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/ds/VertexSet.java @@ -38,7 +38,7 @@ public class VertexSet { private Bitset bs; private static Bitset.Factory factory = FFITypeFactory.getFactory("grape::Bitset"); private long left; - // right is exclusived + // right is exclusive private long right; public VertexSet(int start, int end) { @@ -101,7 +101,7 @@ public void set(int vid) { /** * This function is not thread safe, even you are assigning threads with segmented partition. * Because java {@code Bitset} init the {@code wordinUse = 0} and adaptively increases it, - * {@code ensureCapcity} is to be invoked, causing problem. So we access the highest bit in + * {@code ensureCapacity} is to be invoked, causing problem. So we access the highest bit in * initializing. * * @param vertex input vertex. diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/fragment/ArrowFragment.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/fragment/ArrowFragment.java index 332a2b0da5f2..8fa300f166c5 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/fragment/ArrowFragment.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/fragment/ArrowFragment.java @@ -143,7 +143,7 @@ long vertexOffset( * * @param labelId label for oid. * @param oid querying oid. - * @param vertex vertex hanlder + * @param vertex vertex handler * @return true if vertex with original id oid exists in this fragment. */ @FFINameAlias("GetVertex") @@ -272,7 +272,7 @@ OID_T getOuterVertexOid( @FFINameAlias("Gid2Oid") OID_T gid2Oid(Long gid); - // Oid2Gid can not be made in java, since java pass primitives, even long in + // Oid2Gid cannot be made in java, since java pass primitives, even long in // value. // @FFINameAlias("Oid2Gid") // boolean oid2Gid(int vertexLabelId, OID_T oid, Long gid); diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/graph/AbstractEdgeManager.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/graph/AbstractEdgeManager.java index b8b934a5f229..58a1ed6977d3 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/graph/AbstractEdgeManager.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/graph/AbstractEdgeManager.java @@ -207,7 +207,7 @@ public CSRHolder( numOfEdges = new long[(int) innerVerticesNum]; nbrPositions = new int[(int) innerVerticesNum]; // marks the mapping between lid to start pos of nbr, i.e. offset. - // the reason why we don't resuse oeBegin Offset is that eid may not sequential. + // the reason why we don't reuse oeBegin Offset is that eid may not sequential. // edatas = (BIZ_EDATA_T[]) Array.newInstance(bizEdataClass, // (int)totalNumOfEdges); // edatas = (BIZ_EDATA_T[]) new Object[(int) totalNumOfEdges]; diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/DefaultMessageManager.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/DefaultMessageManager.java index da4caa8f9984..224c460a2837 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/DefaultMessageManager.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/DefaultMessageManager.java @@ -207,7 +207,7 @@ void syncStateOnOuterVertexArrowP @CXXReference MSG_T msg); /** - * Send the a vertex's data to other fragment througn outgoing edges. + * Send the a vertex's data to other fragment through outgoing edges. * * @param frag ImmutableEdgeCutFragment. * @param vertex querying vertex. @@ -222,7 +222,7 @@ void sendMsgThroughOEdgesImmuta @CXXReference MSG_T msg); /** - * Send the a vertex's data to other fragment throughn outgoing edges. + * Send the a vertex's data to other fragment through outgoing edges. * * @param frag ArrowProjectedFragment. * @param vertex querying vertex. @@ -237,7 +237,7 @@ void sendMsgThroughOEdgesArrowPro @CXXReference MSG_T msg); /** - * Send the a vertex's data to other fragment throughn incoming edges. + * Send the a vertex's data to other fragment through incoming edges. * * @param frag ImmutableEdgecutFragment. * @param vertex querying vertex. @@ -252,7 +252,7 @@ void sendMsgThroughIEdgesImmuta @CXXReference MSG_T msg); /** - * Send the a vertex's data to other fragment throughn incoming edges. + * Send the a vertex's data to other fragment through incoming edges. * * @param frag ArrowProjectedFragment. * @param vertex querying vertex. @@ -267,7 +267,7 @@ void sendMsgThroughIEdgesArrowPro @CXXReference MSG_T msg); /** - * Send the a vertex's data to other fragment throughn incoming and outgoing edges. + * Send the a vertex's data to other fragment through incoming and outgoing edges. * * @param frag ImmutableEdgeCutFragment. * @param vertex querying vertex. @@ -282,7 +282,7 @@ void sendMsgThroughEdgesImmutab @CXXReference MSG_T msg); /** - * Send the a vertex's data to other fragment throughn incoming and outgoing edges. + * Send the a vertex's data to other fragment through incoming and outgoing edges. * * @param frag ArrowProjectedFragment. * @param vertex querying vertex. diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelEngine.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelEngine.java index 2a1b385d833e..d310faf08eef 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelEngine.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelEngine.java @@ -356,7 +356,7 @@ public void run() { /** * Apply Triconsumer for each vertex in vertices, in a parallel schema. Used in property - * grpah-app where the vertex id label is needed. + * graph-app where the vertex id label is needed. * * @param vertices VertexRange obj contains querying vertices. * @param vertexLabelId vertex label id. @@ -413,7 +413,7 @@ public void run() { /** * Apply Triconsumer for each vertex in vertices, without checking the vertexSet in a parallel - * schema. Used in property grpah-app where the vertex id label is needed. + * schema. Used in property graph-app where the vertex id label is needed. * * @param vertices VertexRange obj contains querying vertices. * @param vertexLabelId vertex label id. diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelMessageManager.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelMessageManager.java index 2a7df569b259..52fdf3d5f0ce 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelMessageManager.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelMessageManager.java @@ -212,7 +212,7 @@ default boolean sendMsgThroughIEdges( void initChannels(int channel_num); /** - * Retrive a message archive. + * Retrieve a message archive. * * @param buf place to store the archive. * @return true if got one. @@ -318,7 +318,7 @@ void syncStateOnOuterVertexArrowProjectedNoMsg( @FFISkip UNUSED_T vdata); /** - * Send the a vertex's data to other fragment througn outgoing edges. + * Send the a vertex's data to other fragment through outgoing edges. * * @param frag ImmutableEdgeCutFragment. * @param vertex querying vertex. @@ -343,7 +343,7 @@ void sendMsgThroughOEdgesImmutable( @FFISkip UNUSED_T unused); /** - * Send the a vertex's data to other fragment througn outgoing edges. + * Send the a vertex's data to other fragment through outgoing edges. * * @param frag ArrowProjectedFragment. * @param vertex querying vertex. @@ -418,7 +418,7 @@ void sendMsgThroughEdgesArrowProjected( @FFISkip UNUSED_T unused); /** - * Send the a vertex's data to other fragment througn incoming edges. + * Send the a vertex's data to other fragment through incoming edges. * * @param frag ImmutableEdgecutFragment. * @param vertex querying vertex. @@ -443,7 +443,7 @@ void sendMsgThroughIEdgesImmutable( @FFISkip UNUSED_T unused); /** - * Send the a vertex's data to other fragment througn incoming edges. + * Send the a vertex's data to other fragment through incoming edges. * * @param message type. * @param frag ArrowProjectedFragment. diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelPropertyMessageManager.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelPropertyMessageManager.java index 25457532d4de..314cf640e6b9 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelPropertyMessageManager.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/parallel/ParallelPropertyMessageManager.java @@ -41,7 +41,7 @@ import java.util.function.Supplier; /** - * As PropertyParalleMessager.h has not much difference from ParallelMessageManager. + * As PropertyParallelMessager.h has not much difference from ParallelMessageManager. */ @FFITypeAlias(GS_PARALLEL_PROPERTY_MESSAGE_MANAGER) @CXXHead({ diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/stdcxx/FFIByteVector.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/stdcxx/FFIByteVector.java index 87cecf06ac0f..15cde2c9c951 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/stdcxx/FFIByteVector.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/stdcxx/FFIByteVector.java @@ -155,7 +155,7 @@ public long getRawLong(long arg0) { } /** - * Read serveral bytes to a byte array. + * Read several bytes to a byte array. * * @param b Receive data * @param bOff first place to put diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/stdcxx/FakeFFIByteVector.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/stdcxx/FakeFFIByteVector.java index 1cdfae316584..ed5e5beffcfc 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/stdcxx/FakeFFIByteVector.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/stdcxx/FakeFFIByteVector.java @@ -128,7 +128,7 @@ public long getRawLong(long arg0) { } /** - * Read serveral bytes to a byte array. + * Read several bytes to a byte array. * * @param b Receive data * @param bOff first place to put diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/AppContextGetter.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/AppContextGetter.java index 1ff92309e0f6..28a34b528a41 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/AppContextGetter.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/AppContextGetter.java @@ -97,7 +97,7 @@ public static String getPropertyDefaultContextName( */ public static String getDefaultContextName(Class appClass) { // There is a special case: GiraphComputation, which is a driver, since ctx type is - // not specified at compile time, so it is not possible to retrive in a normal way. + // not specified at compile time, so it is not possible to retrieve in a normal way. if (appClass.getName().equals("com.alibaba.graphscope.app.GiraphComputationAdaptor")) { return "com.alibaba.graphscope.context.GiraphComputationAdaptorContext"; } @@ -126,7 +126,7 @@ public static String getParallelContextName(Class app * For parallel property app ,the index of context type in template is 1. * * @param appClass user-defined app class object. - * @return the corrsponding class name. + * @return the corresponding class name. */ public static String getParallelPropertyContextName( Class appClass) { diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/AtomicLongArrayWrapper.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/AtomicLongArrayWrapper.java index 52384c5878b3..28a20bcce4c8 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/AtomicLongArrayWrapper.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/AtomicLongArrayWrapper.java @@ -101,7 +101,7 @@ public void compareAndSet(int ind, long newValue) { } /** - * Atomicl update the array, compare values using unsigned comparasion. + * Atomically update the array, compare values using unsigned comparison. * * @param vertex querying vertex. * @param newValue new value. @@ -116,7 +116,7 @@ public void compareAndSetMinUnsigned(Vertex vertex, long newValue) { } /** - * Atomicl update the array, compare values using unsigned comparasion. + * Atomically update the array, compare values using unsigned comparison. * * @param vertexId querying vertex id. * @param newValue new value. diff --git a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/FFITypeFactoryhelper.java b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/FFITypeFactoryhelper.java index 95e8c3199d5a..0fb44d409851 100644 --- a/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/FFITypeFactoryhelper.java +++ b/analytical_engine/java/grape-jdk/src/main/java/com/alibaba/graphscope/utils/FFITypeFactoryhelper.java @@ -87,7 +87,7 @@ public static String javaType2CppType(Class clz) { } /** - * The created typed array should be set address with baseTypedArray. One can not add more data + * The created typed array should be set address with baseTypedArray. One cannot add more data * to this object * @return */ @@ -226,7 +226,7 @@ public static DenseVertexSet.Factory getDenseVertexSetFactory() { } /** - * get the ffiVectorFactor which can produce std::vector, here foreignType can be netsted + * get the ffiVectorFactor which can produce std::vector, here foreignType can be nested * * @param foreignTypeName foreign name (cpp name, full-qualified) * @return Factory instance. @@ -287,7 +287,7 @@ public static PrimitiveMessage newLongPrimitiveMsg() { /** * Create the template msg instance. * - * @param clz element class instace. + * @param clz element class instance. * @param element type * @return created instance. */ @@ -436,7 +436,7 @@ public static DoubleMsg newDoubleMsg(double value) { } /** - * For Any ffi-gened class, we can get the typealias via annotation + * For Any ffi-generated class, we can get the typealias via annotation * * @param ffiPointer Java class generated by ffi. * @return foreignName diff --git a/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/annotation/ArrowProjectedGenerator.java b/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/annotation/ArrowProjectedGenerator.java index 629065ddd161..3849f5b0f21d 100644 --- a/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/annotation/ArrowProjectedGenerator.java +++ b/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/annotation/ArrowProjectedGenerator.java @@ -32,7 +32,7 @@ /** * Class which add typespec to ffiGenBatch for arrowProjectedFragment. In annotation invoker, we - * alread generated some classes, try to avoid regeneration. + * already generated some classes, try to avoid regeneration. * TODO(zhanglei): generate according to message strategy, if provided. */ public class ArrowProjectedGenerator { diff --git a/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/annotation/Main.java b/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/annotation/Main.java index 5b04a5d1c0ec..2a4ce6afe11d 100644 --- a/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/annotation/Main.java +++ b/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/annotation/Main.java @@ -20,7 +20,7 @@ import org.slf4j.LoggerFactory; /** - * A main class demostrate the usage of GraphScopeAppScanner. + * A main class demonstrate the usage of GraphScopeAppScanner. */ public class Main { diff --git a/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/runtime/GraphScopeClassLoader.java b/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/runtime/GraphScopeClassLoader.java index 115d10c1fc33..13bc437cba6d 100644 --- a/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/runtime/GraphScopeClassLoader.java +++ b/analytical_engine/java/grape-runtime/src/main/java/com/alibaba/graphscope/runtime/GraphScopeClassLoader.java @@ -61,7 +61,7 @@ public class GraphScopeClassLoader { * * @param classPath a string will be interpreted as java class path. * @return new classloader with specified classPath. - * @throws IllegalAccessException if ClassScope can not get loaded libraries. + * @throws IllegalAccessException if ClassScope cannot get loaded libraries. */ public static URLClassLoader newGraphScopeClassLoader(String classPath) throws IllegalAccessException { @@ -83,7 +83,7 @@ public static URLClassLoader newGraphScopeClassLoader(String classPath) * Return a default URLClassLoader with no classPath. * * @return the default class loader. - * @throws IllegalAccessException if ClassScope can not get loaded libraries. + * @throws IllegalAccessException if ClassScope cannot get loaded libraries. */ public static URLClassLoader newGraphScopeClassLoader() throws IllegalAccessException { String[] libraries = ClassScope.getLoadedLibraries(ClassLoader.getSystemClassLoader()); @@ -100,7 +100,7 @@ public static URLClassLoader newGraphScopeClassLoader() throws IllegalAccessExce * @param classLoader * @param className a/b/c/ or a.b.c * @return a instance for loaded class. - * @throws ClassNotFoundException if class can not be found in current path. + * @throws ClassNotFoundException if class cannot be found in current path. * @throws InstantiationException if error in creating new instance. * @throws IllegalAccessException if error in creating new instance. */ @@ -126,8 +126,8 @@ public static Object loadAndCreate( * @param foreignName The foreign name for C++ object,shall be fully specified. * @param address The address for C++ object. * @return a FFIPointer wrapper. - * @throws ClassNotFoundException if class can not be found in current path. - * @throws NoSuchMethodException if method for ffi type factory can not be found. + * @throws ClassNotFoundException if class cannot be found in current path. + * @throws NoSuchMethodException if method for ffi type factory cannot be found. * @throws InvocationTargetException if error in invoke the specific method. * @throws IllegalAccessException if error in invoke the specific method. * @throws InstantiationException if error in creating new instance. @@ -153,7 +153,7 @@ public static Object CreateFFIPointer( throw new IllegalArgumentException("Get ffi java class null"); } - // The class loaded by FFITypeFactor's classLoader can not be directly used + // The class loaded by FFITypeFactor's classLoader cannot be directly used // by us. We load again with our class loader. // Class javaClass = classLoader.loadClass(ffiJavaClass.getName()); if (Objects.nonNull(javaClass)) { @@ -177,7 +177,7 @@ public static Object CreateFFIPointer( * @param classLoader url class loader to utilized. * @param className full name for java class. * @return loaded class. - * @throws ClassNotFoundException if target class can not be found in current path. + * @throws ClassNotFoundException if target class cannot be found in current path. */ public static Class loadClass(URLClassLoader classLoader, String className) throws ClassNotFoundException { @@ -189,7 +189,7 @@ public static Class loadClass(URLClassLoader classLoader, String className) * * @param classLoader url class loader to utilized. * @return loaded class. - * @throws ClassNotFoundException if target class can not be found in current path. + * @throws ClassNotFoundException if target class cannot be found in current path. */ public static Class loadCommunicatorClass(URLClassLoader classLoader) throws ClassNotFoundException { @@ -290,7 +290,7 @@ private static class ClassScope { } /** - * Get the libraries already loaded in one classLoader. Note that one lib can not be loaded + * Get the libraries already loaded in one classLoader. Note that one lib cannot be loaded * twice via the same class loader. * * @param loader diff --git a/analytical_engine/java/grape-runtime/src/main/native/ffi_byte_vec_vector.cc b/analytical_engine/java/grape-runtime/src/main/native/ffi_byte_vec_vector.cc index 89d2ee33dab1..11e5114ea86a 100644 --- a/analytical_engine/java/grape-runtime/src/main/native/ffi_byte_vec_vector.cc +++ b/analytical_engine/java/grape-runtime/src/main/native/ffi_byte_vec_vector.cc @@ -24,7 +24,7 @@ extern "C" { #endif // Common Stubs -// This file contains neccessary code enableing porting a +// This file contains necessary code enabling porting a // std::vector> to a java byte vecvector, We don't generate // these jni files since the generated Java FFIByteVector class has been // modified for optimization. diff --git a/analytical_engine/java/grape-runtime/src/main/native/ffi_byte_vector.cc b/analytical_engine/java/grape-runtime/src/main/native/ffi_byte_vector.cc index 0a4cf5dba57f..b99008640222 100644 --- a/analytical_engine/java/grape-runtime/src/main/native/ffi_byte_vector.cc +++ b/analytical_engine/java/grape-runtime/src/main/native/ffi_byte_vector.cc @@ -25,7 +25,7 @@ extern "C" { #endif // Common Stubs -// This file contains neccessary code enableing porting a +// This file contains necessary code enabling porting a // std::vector> to a java byte vecvector, We don't generate // these jni files since the generated Java FFIByteVector class has been // modified for optimization. diff --git a/analytical_engine/java/grape-runtime/src/main/native/ffi_int_vec_vector.cc b/analytical_engine/java/grape-runtime/src/main/native/ffi_int_vec_vector.cc index 1d828f16553b..868494e6d8e8 100644 --- a/analytical_engine/java/grape-runtime/src/main/native/ffi_int_vec_vector.cc +++ b/analytical_engine/java/grape-runtime/src/main/native/ffi_int_vec_vector.cc @@ -24,7 +24,7 @@ extern "C" { #endif // Common Stubs -// This file contains neccessary code enableing porting a +// This file contains necessary code enabling porting a // std::vector> to a java byte vecvector, We don't generate // these jni files since the generated Java FFIByteVector class has been // modified for optimization. diff --git a/analytical_engine/java/grape-runtime/src/main/native/ffi_int_vector.cc b/analytical_engine/java/grape-runtime/src/main/native/ffi_int_vector.cc index 675ecc0d7b51..e16b86459df0 100644 --- a/analytical_engine/java/grape-runtime/src/main/native/ffi_int_vector.cc +++ b/analytical_engine/java/grape-runtime/src/main/native/ffi_int_vector.cc @@ -24,7 +24,7 @@ extern "C" { #endif // Common Stubs -// This file contains neccessary code enableing porting a +// This file contains necessary code enabling porting a // std::vector> to a java byte vecvector, We don't generate // these jni files since the generated Java FFIByteVector class has been // modified for optimization. diff --git a/analytical_engine/java/pom.xml b/analytical_engine/java/pom.xml index 00011a6fdf75..2ac9a3b537b6 100644 --- a/analytical_engine/java/pom.xml +++ b/analytical_engine/java/pom.xml @@ -420,7 +420,7 @@ + of grape-jdk cannot be resolved --> org.codehaus.mojo diff --git a/analytical_engine/java/run_graphx.sh b/analytical_engine/java/run_graphx.sh index 4891cd76dccc..aa765e643f33 100755 --- a/analytical_engine/java/run_graphx.sh +++ b/analytical_engine/java/run_graphx.sh @@ -124,7 +124,7 @@ run_pregel() { } ########################## -# Output useage information. +# Output usage information. # Globals: # None # Arguments: diff --git a/analytical_engine/misc/cpplint.py b/analytical_engine/misc/cpplint.py index d0c8f45a713e..91702bd7cff1 100755 --- a/analytical_engine/misc/cpplint.py +++ b/analytical_engine/misc/cpplint.py @@ -865,7 +865,7 @@ # Files to exclude from linting. This is set by the --exclude flag. _excludes = None -# Whether to supress all PrintInfo messages, UNRELATED to --quiet flag +# Whether to suppress all PrintInfo messages, UNRELATED to --quiet flag _quiet = False # The allowed line length of files. @@ -1258,7 +1258,7 @@ def __init__(self): self._filters_backup = self.filters[:] self.counting = 'total' # In what way are we counting errors? self.errors_by_category = {} # string to int dict storing error counts - self.quiet = False # Suppress non-error messagess? + self.quiet = False # Suppress non-error messages? # output format: # "emacs" - format that emacs can parse (default) @@ -1586,7 +1586,7 @@ def RepositoryName(self): repo = FileInfo(_repository).FullName() root_dir = project_dir while os.path.exists(root_dir): - # allow case insensitive compare on Windows + # allow case-insensitive compare on Windows if os.path.normcase(root_dir) == os.path.normcase(repo): return os.path.relpath(fullname, root_dir).replace('\\', '/') one_up_dir = os.path.dirname(root_dir) @@ -3855,7 +3855,7 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error): elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though - # technically should should flag if at least one side is missing a + # technically it should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: diff --git a/analytical_engine/test/app_tests.sh b/analytical_engine/test/app_tests.sh index 7fa481e9f72a..d6717cd26162 100755 --- a/analytical_engine/test/app_tests.sh +++ b/analytical_engine/test/app_tests.sh @@ -346,7 +346,7 @@ function run_local_vertex_map() { } -# The results of bfs and sssp_path is are non-determinstic. +# The results of bfs and sssp_path is are non-deterministic. # The result of bfs is random because diamond-shaped subgraph, # e.g. there are four edges: 1->2, 1->3, 2->4, 3->4. # For vertex 4, result of bfs may come from vertex 2 or vertex 3. diff --git a/analytical_engine/test/run_java_app.cc b/analytical_engine/test/run_java_app.cc index d5b75037a92a..571080829ac8 100644 --- a/analytical_engine/test/run_java_app.cc +++ b/analytical_engine/test/run_java_app.cc @@ -477,7 +477,7 @@ void Run(vineyard::Client& client, const grape::CommSpec& comm_spec, std::string selector_string; std::string selectors_string; if (run_property == 0) { - // labeled_vetex_data + // labeled_vertex_data selector_string = "r:label0"; { std::vector> selector_list; @@ -521,7 +521,7 @@ void Run(vineyard::Client& client, const grape::CommSpec& comm_spec, std::string selector_string; std::string selectors_string; if (run_property == 0) { - // vetex_data + // vertex_data selector_string = "r"; { std::vector> selector_list; diff --git a/charts/gie-standalone/values.yaml b/charts/gie-standalone/values.yaml index 32e503290708..79c121eb4574 100644 --- a/charts/gie-standalone/values.yaml +++ b/charts/gie-standalone/values.yaml @@ -119,7 +119,7 @@ executor: ## podAntiAffinityPreset: soft - ## Affinity for Graphscope store pods assignment + ## Affinity for GraphScope store pods assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set ## @@ -302,7 +302,7 @@ frontend: ## podAntiAffinityPreset: soft - ## Affinity for Graphscope store pods assignment + ## Affinity for GraphScope store pods assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set ## diff --git a/charts/graphscope-interactive/templates/engine/statefulset.yaml b/charts/graphscope-interactive/templates/engine/statefulset.yaml index 8084aecdc311..bb880113b04d 100644 --- a/charts/graphscope-interactive/templates/engine/statefulset.yaml +++ b/charts/graphscope-interactive/templates/engine/statefulset.yaml @@ -71,7 +71,7 @@ spec: echo "${ENGINE_BINARY_PATH} binary not found or not executable, exiting..." exit 1 fi - # always try to load the builin graph: gs_interactive_default_graph + # always try to load the built-in graph: gs_interactive_default_graph # for case CURRENT_GRAPH is not the default_graph, we assume the data is already loaded. # TODO. builtin_graph_schema_path="${INTERACTIVE_WORKSPACE}/data/${DEFAULT_GRAPH_NAME}/graph.yaml" diff --git a/coordinator/gscoordinator/launcher.py b/coordinator/gscoordinator/launcher.py index faa5842ca837..308695c23672 100644 --- a/coordinator/gscoordinator/launcher.py +++ b/coordinator/gscoordinator/launcher.py @@ -47,7 +47,7 @@ def configure_environ(): else: os.environ["OPAL_PREFIX"] = opal_prefix if platform.system() == "Darwin": - # requires on MacOS, but break Kubernetes tests on Linux + # requires on macOS, but break Kubernetes tests on Linux os.environ["OPAL_BINDIR"] = os.path.join(opal_prefix, "bin") os.environ["OPAL_LIBDIR"] = os.path.join(opal_prefix, "lib") os.environ["OPAL_DATADIR"] = os.path.join(opal_prefix, "share") diff --git a/coordinator/gscoordinator/monitor.py b/coordinator/gscoordinator/monitor.py index 1115afe35d94..73669fe00553 100644 --- a/coordinator/gscoordinator/monitor.py +++ b/coordinator/gscoordinator/monitor.py @@ -125,7 +125,7 @@ class Monitor: analyticalRequestCounter = Counter( "analytical_request", "Count requests of analytical requests" ) - # analyticalRequestGauge = Gauge("analytical_request_time", "The analytical opration task time", ["op_name"]) + # analyticalRequestGauge = Gauge("analytical_request_time", "The analytical operation task time", ["op_name"]) analyticalRequestGauge = TemGauge( "analytical_request_time", "The analytical operation task time", ["op_name"] ) diff --git a/coordinator/gscoordinator/template/CMakeLists.template b/coordinator/gscoordinator/template/CMakeLists.template index ff8827e331d9..6852bf70958a 100644 --- a/coordinator/gscoordinator/template/CMakeLists.template +++ b/coordinator/gscoordinator/template/CMakeLists.template @@ -289,7 +289,7 @@ if (APPLE AND "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") endif() # address the "illegal thread local variable reference to regular symbol" error -# in recent version of brew installed protobuf on MacOS +# in recent version of brew installed protobuf on macOS set(BUNDLED_Protobuf_LIBRARIES) set(BUNDLED_Protobuf_LIBRARIES_DLC) # generated by delocate-wheel, see also: https://github.com/matthew-brett/delocate/issues/150 if (APPLE) diff --git a/coordinator/gscoordinator/utils.py b/coordinator/gscoordinator/utils.py index f39d99f2b036..bc20f008c719 100644 --- a/coordinator/gscoordinator/utils.py +++ b/coordinator/gscoordinator/utils.py @@ -518,7 +518,7 @@ def compile_app( java_codegen_out_dir = os.path.join( workspace, f"{JAVA_CODEGEN_OUTPUT_PREFIX}-{library_name}" ) - # TODO(zhanglei): Could this codegen caching happends on engine side? + # TODO(zhanglei): Could this codegen caching happens on engine side? if os.path.isdir(java_codegen_out_dir): logger.info( "Found existing java codegen directory: %s, skipped codegen", @@ -896,7 +896,7 @@ def _pre_process_for_run_app_op(op, op_result_pool, key_to_op, **kwargs): parent_op.attr[types_pb2.E_DATA_TYPE].s.decode("utf-8", errors="ignore"), ) - # for giraph app, we need to add args into orginal query_args, which is a json string + # for giraph app, we need to add args into original query_args, which is a json string # first one should be user params, second should be lib_path if app_type.startswith("giraph:"): user_params["app_class"] = GIRAPH_DRIVER_CLASS diff --git a/docs/Doxyfile b/docs/Doxyfile index 837a3fa96408..1d4b16b81be8 100644 --- a/docs/Doxyfile +++ b/docs/Doxyfile @@ -222,7 +222,7 @@ QT_AUTOBRIEF = NO # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. +# not recognized anymore. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO @@ -552,7 +552,7 @@ INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES, upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows +# in case and if your file system supports case-sensitive file names. Windows # (including Cygwin) ands Mac users are advised to set this option to NO. # The default value is: system dependent. @@ -717,7 +717,7 @@ SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the +# popen()) the command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. @@ -1233,7 +1233,7 @@ HTML_EXTRA_FILES = HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A +# in the HTML output. For a value of 0 the output will use grayscale only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. diff --git a/flex/bin/interactive_server.cc b/flex/bin/interactive_server.cc index 38a17650ccd3..eee30c6238d4 100644 --- a/flex/bin/interactive_server.cc +++ b/flex/bin/interactive_server.cc @@ -266,7 +266,7 @@ int main(int argc, char** argv) { auto schema = gs::Schema::LoadFromYaml(graph_schema_path); - // Ths schema is loaded just to get the plugin dir and plugin list + // The schema is loaded just to get the plugin dir and plugin list gs::init_codegen_proxy(vm, graph_schema_path, engine_config_file); db.Close(); auto load_res = db.Open(schema, data_path, shard_num); diff --git a/flex/bin/load_plan_and_gen.sh b/flex/bin/load_plan_and_gen.sh index a59ef3e6debe..fe02ce4938d6 100755 --- a/flex/bin/load_plan_and_gen.sh +++ b/flex/bin/load_plan_and_gen.sh @@ -133,7 +133,7 @@ cypher_to_plan() { err "Compiler jar = ${COMPILER_JAR} not exists." exit 1 fi - # add extrac_key_value_config + # add extra_key_value_config extra_config="name:${procedure_name}" extra_config="${extra_config},description:${procedure_description}" @@ -197,7 +197,7 @@ compile_hqps_so() { last_file_name=$(basename ${input_path}) - # requiest last_file_name suffix is .pb + # request last_file_name suffix is .pb if [[ $last_file_name == *.pb ]]; then query_name="${last_file_name%.pb}" elif [[ $last_file_name == *.cc ]]; then @@ -358,7 +358,7 @@ compile_pegasus_so() { last_file_name=$(basename ${input_path}) info "last file name: ${last_file_name}" - # requiest last_file_name suffix is .pb + # request last_file_name suffix is .pb if [[ $last_file_name == *.pb ]]; then query_name="${last_file_name%.pb}" info "File has .pb suffix." diff --git a/flex/cmake/FindArrow.cmake b/flex/cmake/FindArrow.cmake index b26ca9adf752..abcf4a239bf0 100644 --- a/flex/cmake/FindArrow.cmake +++ b/flex/cmake/FindArrow.cmake @@ -1,4 +1,4 @@ -# The file cmake/FindArrow.cmake is referered from project +# The file cmake/FindArrow.cmake is referred from project # https://github.com/apache/arrow # # https://github.com/apache/arrow/blob/master/cpp/cmake_modules/FindArrow.cmake diff --git a/flex/codegen/src/building_context.h b/flex/codegen/src/building_context.h index a63c085bdaaf..ee25d4bd5e1b 100644 --- a/flex/codegen/src/building_context.h +++ b/flex/codegen/src/building_context.h @@ -319,7 +319,7 @@ class BuildingContext { // for input tag_id, // return -1 if tag_id == -1 - // return new asigned tag_ind if it doesn't appears before; + // return new assigned tag_ind if it doesn't appears before; // return the found tag_ind if it appears before; int32_t CreateOrGetTagInd(int tag_id) { return tag_ind_mapping_.CreateOrGetTagInd(tag_id); diff --git a/flex/codegen/src/codegen_utils.h b/flex/codegen/src/codegen_utils.h index 9d361926df2b..5168789fe4c0 100644 --- a/flex/codegen/src/codegen_utils.h +++ b/flex/codegen/src/codegen_utils.h @@ -29,7 +29,7 @@ limitations under the License. namespace gs { -// remote deuplicate from vector +// remote duplicate from vector template std::vector remove_duplicate(const std::vector& labels) { std::vector res; diff --git a/flex/codegen/src/hqps/hqps_case_when_builder.h b/flex/codegen/src/hqps/hqps_case_when_builder.h index 370bc79cce91..4c97c288930f 100644 --- a/flex/codegen/src/hqps/hqps_case_when_builder.h +++ b/flex/codegen/src/hqps/hqps_case_when_builder.h @@ -54,7 +54,7 @@ class CaseWhenBuilder : public ExprBuilder { when_expr) { VLOG(10) << "Got when then exprs of size: " << when_expr.size(); - // Basiclly, each when_then is a if then. + // Basically, each when_then is a if then. for (auto& when_then_expr : when_expr) { auto& when_val = when_then_expr.when_expression(); auto& the_result_expr = when_then_expr.then_result_expression(); @@ -105,7 +105,7 @@ class CaseWhenBuilder : public ExprBuilder { func_call_template_typename_str = get_func_call_typename_str(); func_call_params_str = get_func_call_params_str(); - // the func_call impl is overrided + // the func_call impl is overridden func_call_impl_str = get_func_call_impl_str(); private_filed_str = get_private_filed_str(); diff --git a/flex/codegen/src/hqps/hqps_edge_expand_builder.h b/flex/codegen/src/hqps/hqps_edge_expand_builder.h index cdff1fa337ef..d2629b15bb03 100644 --- a/flex/codegen/src/hqps/hqps_edge_expand_builder.h +++ b/flex/codegen/src/hqps/hqps_edge_expand_builder.h @@ -42,7 +42,7 @@ static constexpr const char* EDGE_EXPAND_E_OPT_MULTI_EDGE_NO_FILTER_TEMPLATE_STR = "auto %1% = gs::make_edge_expand_multie_opt<%2%>(%3%, %4%, %5%);\n"; -// This opt can only be used by both edge expandv, with multiplet edge triplet, +// This opt can only be used by both edge expandv, with multiple edge triplet, static constexpr const char* EDGE_EXPAND_V_OPT_MULTI_EDGE_NO_FILTER_TEMPLATE_STR = "auto %1% = gs::make_edge_expand_multiv_opt(%2%, %3%);\n"; diff --git a/flex/codegen/src/hqps/hqps_expr_builder.h b/flex/codegen/src/hqps/hqps_expr_builder.h index 892a72d9f5b1..7d21a1ef6933 100644 --- a/flex/codegen/src/hqps/hqps_expr_builder.h +++ b/flex/codegen/src/hqps/hqps_expr_builder.h @@ -245,7 +245,7 @@ static common::DataType eval_expr_return_type(const common::Expression& expr) { return tmp_stack.top().node_type().data_type(); } -// Simlutate the calculation of expression, return the result data type. +// Simulate the calculation of expression, return the result data type. // convert to prefix expression /*Build a expression struct from expression*/ @@ -408,7 +408,7 @@ class ExprBuilder { } } - // Add extract operator with var. Currently not support extract on a compicate + // Add extract operator with var. Currently not support extract on a complicated // expression. void AddExtractOpr(const common::Extract& extract_opr, const common::ExprOpr& expr_opr) { diff --git a/flex/codegen/src/hqps/hqps_path_expand_builder.h b/flex/codegen/src/hqps/hqps_path_expand_builder.h index 87a9fc50cdf7..95fdd4b856c4 100644 --- a/flex/codegen/src/hqps/hqps_path_expand_builder.h +++ b/flex/codegen/src/hqps/hqps_path_expand_builder.h @@ -369,8 +369,8 @@ class PathExpandOpBuilder { // get_v_opt // path_expand_opt // op_code. -// NOTE: we currenly only support path expand v, the in_tag can be fetch fromn -// path_expand_pb itself, while the res_alilas shall be fetch from the later +// NOTE: we currently only support path expand v, the in_tag can be fetch from +// path_expand_pb itself, while the res_alias shall be fetch from the later // get_v template static std::string BuildPathExpandVOp( diff --git a/flex/codegen/src/hqps/hqps_project_builder.h b/flex/codegen/src/hqps/hqps_project_builder.h index 02e2c58ee48c..18e82454f355 100644 --- a/flex/codegen/src/hqps/hqps_project_builder.h +++ b/flex/codegen/src/hqps/hqps_project_builder.h @@ -297,7 +297,7 @@ std::string project_mapping_to_string( BuildingContext& ctx, const physical::Project::ExprAlias& mapping, TagIndMapping& new_tag_ind_map) { int32_t res_alias = mapping.alias().value(); - // TODO: Currenly we assume each expr_alias contains only property for that + // TODO: Currently we assume each expr_alias contains only property for that // input tag auto real_res_alias = new_tag_ind_map.CreateOrGetTagInd(res_alias); diff --git a/flex/codegen/src/hqps/hqps_scan_builder.h b/flex/codegen/src/hqps/hqps_scan_builder.h index fa7ea9eb5c61..b4c750ec34d3 100644 --- a/flex/codegen/src/hqps/hqps_scan_builder.h +++ b/flex/codegen/src/hqps/hqps_scan_builder.h @@ -108,7 +108,7 @@ class ScanOpBuilder { // labels. std::vector expr_label_ids; if (try_to_get_label_ids_from_expr(predicate, expr_label_ids)) { - // join expr_label_ids with table_lable_ids; + // join expr_label_ids with table_label_ids; VLOG(10) << "Found label ids in expr: " << gs::to_string(expr_label_ids); intersection(labels_ids_, expr_label_ids); diff --git a/flex/codegen/src/hqps_generator.h b/flex/codegen/src/hqps_generator.h index 347ebf90af95..c2bfa55e3023 100644 --- a/flex/codegen/src/hqps_generator.h +++ b/flex/codegen/src/hqps_generator.h @@ -181,7 +181,7 @@ class QueryGenerator { : ctx_(ctx), plan_(plan) {} std::string GenerateQuery() { - // During generate query body, we will track the parameteres + // During generate query body, we will track the parameters // And also generate the expression for needed std::string query_code = build_query_code(); std::string expr_code; @@ -320,7 +320,7 @@ class QueryGenerator { case physical::PhysicalOpr::Operator::kEdge: { // edge expand physical::EdgeExpand real_edge_expand = opr.edge(); - // try to use infomation from later operator + // try to use information from later operator std::vector dst_vertex_labels; if (i + 1 < size) { auto& get_v_op_opr = plan_.plan(i + 1).opr(); @@ -409,7 +409,7 @@ class QueryGenerator { case physical::PhysicalOpr::Operator::kGroupBy: { // auto& meta_data = meta_datas[0]; - // meta_data is currenly not used in groupby. + // meta_data is currently not used in groupby. physical::PhysicalOpr::MetaData meta_data; auto& group_by_op = opr.group_by(); if (group_by_op.mappings_size() > 0) { @@ -538,7 +538,7 @@ class QueryGenerator { }; // When building a join op, we need to consider the following cases: -// 0. tag_id to tag_ind mapping, two plan shoud keep different mappings +// 0. tag_id to tag_ind mapping, two plan should keep different mappings // const physical::PhysicalOpr::MetaData& meta_data template static std::array BuildJoinOp( @@ -678,7 +678,7 @@ static std::string BuildApplyOp( auto new_building_ctx = ctx.CreateSubTaskContext(); auto sub_task_generator = QueryGenerator(new_building_ctx, sub_plan); - // QueryGenrator sub_task_generator(new_building_ctx, sub_plan_); + // QueryGenerator sub_task_generator(new_building_ctx, sub_plan_); // gen a lambda function. lambda_func_name = ctx.GetNextLambdaFuncName(); std::stringstream inner_ss; diff --git a/flex/codegen/src/pb_parser/ir_data_type_parser.h b/flex/codegen/src/pb_parser/ir_data_type_parser.h index d43d5cdae0c3..4c5bfec43fd1 100644 --- a/flex/codegen/src/pb_parser/ir_data_type_parser.h +++ b/flex/codegen/src/pb_parser/ir_data_type_parser.h @@ -27,7 +27,7 @@ namespace gs { // There can be multiple labels. // for each label, we have multiple properties. -// deuplicate the property name and types if two edge label are same, only +// duplicate the property name and types if two edge label are same, only // differs on src-dst pair static std::pair>, std::vector>> diff --git a/flex/codegen/src/pb_parser/query_params_parser.h b/flex/codegen/src/pb_parser/query_params_parser.h index ae7e7bce2001..ca38e2c9be24 100644 --- a/flex/codegen/src/pb_parser/query_params_parser.h +++ b/flex/codegen/src/pb_parser/query_params_parser.h @@ -167,7 +167,7 @@ bool try_to_get_oid_from_expr(const common::Expression& expression, if (num_oprs == 3) { return try_to_get_oid_from_expr_impl(expression, oid); } - // TODO: current hacks the implementaion. (label within 1) && (id == 8780) + // TODO: current hacks the implementation. (label within 1) && (id == 8780) common::Expression new_expr; new_expr.add_operators()->CopyFrom(expression.operators(7)); new_expr.add_operators()->CopyFrom(expression.operators(8)); diff --git a/flex/coordinator/gs_flex_coordinator/core/alert/alert_manager.py b/flex/coordinator/gs_flex_coordinator/core/alert/alert_manager.py index 3f3c80695384..eb13d68cc048 100644 --- a/flex/coordinator/gs_flex_coordinator/core/alert/alert_manager.py +++ b/flex/coordinator/gs_flex_coordinator/core/alert/alert_manager.py @@ -98,7 +98,7 @@ def list_alert_messages( ) -> List[AlertMessage]: enable_filter = True if start_time is None and end_time is not None: - # None -> date, fetch end day's messages, and disable date fileter + # None -> date, fetch end day's messages, and disable date filter enable_filter = False end_date_filter = decode_datetimestr(end_time) start_date_filter = end_date_filter @@ -107,7 +107,7 @@ def list_alert_messages( start_date_filter = decode_datetimestr(start_time) end_date_filter = datetime.datetime.now() elif start_time is None and end_time is None: - # None -> None, fetch today's messages, and disable date fileter + # None -> None, fetch today's messages, and disable date filter enable_filter = False start_date_filter = end_date_filter = datetime.datetime.now() else: diff --git a/flex/engines/hqps_db/core/base_engine.h b/flex/engines/hqps_db/core/base_engine.h index ba01e925c482..3dce30e77540 100644 --- a/flex/engines/hqps_db/core/base_engine.h +++ b/flex/engines/hqps_db/core/base_engine.h @@ -160,7 +160,7 @@ class BaseEngine { copied_ctx.set_sub_task_start_tag(start_tag); auto inner_ctx = func(std::move(copied_ctx)); - // We shall obtain the active indcies in res_ctx via csr offset + // We shall obtain the active indices in res_ctx via csr offset // arrays. std::vector tmp_vec = inner_ctx.ObtainOffsetFromTag(start_tag); @@ -521,7 +521,7 @@ class BaseEngine { // InnerJoin // for example, join (a,b,c) with (b,c,d) we got (a,b,c,d); // prob: the mapping of tag_id to tag_inds may change. - // prob: builing new columns. + // prob: building new columns. template ::type* = @@ -877,7 +877,7 @@ class BaseEngine { static auto Intersect(CTX_X&& ctx_x, CTX_Y&& ctx_y) { using ctx_x_iter_t = typename CTX_X::iterator; using ctx_y_iter_t = typename CTX_Y::iterator; - // the prev column (the last column in prev_tuple shoud be the same.) + // the prev column (the last column in prev_tuple should be the same.) using ctx_x_all_ele_t = std::remove_reference_t().GetAllElement())>; using ctx_y_all_ele_t = std::remove_reference_t> { * @brief A data structure holding all the data we have in query. * * @tparam HEAD_T The current head node. - * @tparam base_tag The base tag based on which the tag id increases. Defaultly + * @tparam base_tag The base tag based on which the tag id increases. Default * 0, set to non-zero for grouped sets. * @tparam cur_alias To which col_id it is aliased. * @tparam ALIAS_COL The saved obj in query up till now. @@ -873,7 +873,7 @@ class Context { << std::to_string(deduped_tag); } - // This dedup doesn't clear deplication in indivdual set.!!!!! + // This dedup doesn't clear duplication in individual set.!!!!! // start from alias_to_use, simplify all later csr. // no meaning to dedup with tag == 0; template diff --git a/flex/engines/hqps_db/core/operator/edge_expand.h b/flex/engines/hqps_db/core/operator/edge_expand.h index f39b5ff75317..0da36d3fa70d 100644 --- a/flex/engines/hqps_db/core/operator/edge_expand.h +++ b/flex/engines/hqps_db/core/operator/edge_expand.h @@ -664,7 +664,7 @@ class EdgeExpand { for (size_t i = 0; i < edge_labels.size(); ++i) { // Check whether the edge triplet match input vertices. - // return a hanlder to get edges + // return a handler to get edges std::vector cur_src_vids; std::vector cur_active_inds; if (direction == Direction::Out || direction == Direction::Both) { @@ -784,7 +784,7 @@ class EdgeExpand { auto prop_names_vec = prop_names_to_vec(prop_names); for (size_t i = 0; i < edge_labels.size(); ++i) { // Check whether the edge triplet match input vertices. - // return a hanlder to get edges + // return a handler to get edges auto sub_graph_vec = graph.GetSubGraph( edge_labels[i][0], edge_labels[i][1], edge_labels[i][2], gs::to_string(direction), prop_names_vec[i]); @@ -901,7 +901,7 @@ class EdgeExpand { auto prop_names_vec = prop_names_to_vec(prop_names); for (size_t i = 0; i < edge_labels.size(); ++i) { // Check whether the edge triplet match input vertices. - // return a hanlder to get edges + // return a handler to get edges auto sub_graph_vec = graph.GetSubGraph( edge_labels[i][0], edge_labels[i][1], edge_labels[i][2], gs::to_string(direction), prop_names_vec[i]); diff --git a/flex/engines/hqps_db/core/operator/group_by.h b/flex/engines/hqps_db/core/operator/group_by.h index c653c453670f..0642a71bc29f 100644 --- a/flex/engines/hqps_db/core/operator/group_by.h +++ b/flex/engines/hqps_db/core/operator/group_by.h @@ -28,7 +28,7 @@ limitations under the License. namespace gs { -// For each aggreator, return the type of applying aggregate on the desired col. +// For each aggregator, return the type of applying aggregate on the desired col. // with possible aggregate func. template @@ -220,7 +220,7 @@ struct Rearrange { typename UnWrapTuple::context_t; }; -// only two nodees +// only two nodes // template // struct Rearrange { // using context_t = Context; diff --git a/flex/engines/hqps_db/core/operator/path_expand.h b/flex/engines/hqps_db/core/operator/path_expand.h index d724ea0cf3a3..000309c22701 100644 --- a/flex/engines/hqps_db/core/operator/path_expand.h +++ b/flex/engines/hqps_db/core/operator/path_expand.h @@ -37,7 +37,7 @@ namespace gs { * * Currently we only support path expand with only one edge label and only one *dst label. - * The input vertex set must be of one labe. + * The input vertex set must be of one label. **/ template @@ -169,7 +169,7 @@ class PathExpand { } // Path expand to vertices with columns. - // PathExpand to vertices with vertex properties also retreived + // PathExpand to vertices with vertex properties also retrieved template 0)>::type* = nullptr, @@ -519,7 +519,7 @@ class PathExpand { } LOG(INFO) << "visit array time: " << visit_array_time << ", gid size: " << gids.size(); - // select vetices that are in range. + // select vertices that are in range. offsets.emplace_back(0); offsets.emplace_back(gids.size()); @@ -583,7 +583,7 @@ class PathExpand { visit_array_time += t0; } LOG(INFO) << "visit array time: " << visit_array_time; - // select vetices that are in range. + // select vertices that are in range. std::vector flat_gids; std::vector flat_offsets; std::vector dists; diff --git a/flex/engines/hqps_db/core/operator/sink.h b/flex/engines/hqps_db/core/operator/sink.h index 1662c251d94e..5e7b3849a68c 100644 --- a/flex/engines/hqps_db/core/operator/sink.h +++ b/flex/engines/hqps_db/core/operator/sink.h @@ -608,7 +608,7 @@ class SinkOp { } } - // sinke for tuple with one element + // sink for tuple with one element template ::value) && (gs::is_tuple::value) && diff --git a/flex/engines/hqps_db/core/params.h b/flex/engines/hqps_db/core/params.h index c2f9d58d574b..09a917349745 100644 --- a/flex/engines/hqps_db/core/params.h +++ b/flex/engines/hqps_db/core/params.h @@ -628,7 +628,7 @@ struct EdgeExpandOptMultiLabel { Direction direction_; LabelT edge_label_; - // edge filter func can be apply to every label vertcies + // edge filter func can be apply to every label vertices std::array edge_filter_; std::array other_labels_; // There might be multiple dst labels. @@ -662,7 +662,7 @@ auto make_edge_expandv_opt(Direction dir, LabelT edge_label, LabelT other_label, return EdgeExpandOpt(dir, edge_label, other_label, std::move(func)); } -// Template can not have to variadic template parameters. +// Template cannot have to variadic template parameters. // so we make filter_t as a tuple. template struct GetVOpt; @@ -952,7 +952,7 @@ struct AliasTagProp { : tag_prop_{std::move(prop_names)} {} }; -// Alias the property of multiple tags' multiple propty. +// Alias the property of multiple tags' multiple property. // For the grouping key, use which property, and alias to what. template @@ -966,7 +966,7 @@ struct ProjectSelf { static constexpr int res_alias = _res_alias; }; -// evalutate expression on previous context. +// evaluate expression on previous context. template struct ProjectExpr { static constexpr int res_alias = _res_alias; diff --git a/flex/engines/hqps_db/core/sync_engine.h b/flex/engines/hqps_db/core/sync_engine.h index dbeeae969d03..13eceaa983e4 100644 --- a/flex/engines/hqps_db/core/sync_engine.h +++ b/flex/engines/hqps_db/core/sync_engine.h @@ -318,7 +318,7 @@ class SyncEngine : public BaseEngine { size_t limit = INT_MAX) { // Unwrap params here. auto& select_node = gs::Get(ctx); - // Modifiy offsets. + // Modify offsets. // pass select node by reference. auto pair = EdgeExpand::template EdgeExpandE( graph, select_node, edge_expand_opt.dir_, edge_expand_opt.edge_label_, @@ -327,7 +327,7 @@ class SyncEngine : public BaseEngine { // create new context node, update offsets. return ctx.template AddNode( std::move(pair.first), std::move(pair.second), alias_to_use); - // old context will be abondon here. + // old context will be abandoned here. } /// @brief //////// Edge Expand to Edge, with multiple dst vertex labels. @@ -358,7 +358,7 @@ class SyncEngine : public BaseEngine { size_t limit = INT_MAX) { // Unwrap params here. auto& select_node = gs::Get(ctx); - // Modifiy offsets. + // Modify offsets. // pass select node by reference. auto pair = EdgeExpand::template EdgeExpandE( graph, select_node, edge_expand_opt.dir_, edge_expand_opt.edge_label_, @@ -367,7 +367,7 @@ class SyncEngine : public BaseEngine { // create new context node, update offsets. return ctx.template AddNode( std::move(pair.first), std::move(pair.second), alias_to_use); - // old context will be abondon here. + // old context will be abandoned here. } template (ctx); - // Modifiy offsets. + // Modify offsets. // pass select node by reference. auto pair = EdgeExpand::EdgeExpandV( graph, select_node, edge_expand_opt.direction_, @@ -390,7 +390,7 @@ class SyncEngine : public BaseEngine { // create new context node, update offsets. return ctx.template AddNode(std::move(pair.first), std::move(pair.second), alias_to_use); - // old context will be abondon here. + // old context will be abandoned here. } template && edge_expand_opt) { // Unwrap params here. auto& select_node = gs::Get(ctx); - // Modifiy offsets. + // Modify offsets. // pass select node by reference. auto pair = EdgeExpand::EdgeExpandV( graph, select_node, edge_expand_opt.direction_, @@ -411,7 +411,7 @@ class SyncEngine : public BaseEngine { // create new context node, update offsets. return ctx.template AddNode(std::move(pair.first), std::move(pair.second), alias_to_use); - // old context will be abondon here. + // old context will be abandoned here. } //////////////////////////////////////Path Expand///////////////////////// @@ -618,7 +618,7 @@ class SyncEngine : public BaseEngine { //////////////////////////////////////Project///////////////////////// // Project current relations to new columns, append or not. - // TODO: add type infere back: + // TODO: add type inference back: // typename RES_T = typename ProjectResT< // is_append, Context, // PROJECT_OPT>::result_t @@ -807,7 +807,7 @@ class SyncEngine : public BaseEngine { //////////////////////////////////////Select/Filter///////////////////////// // Select with head node. The type doesn't change - // select can possiblely applied on multiple tags + // select can possibly applied on multiple tags // (!CTX_HEAD_T::is_row_vertex_set) && (!CTX_HEAD_T::is_two_label_set) && template < int... in_col_id, typename CTX_HEAD_T, int cur_alias, int base_tag, @@ -873,7 +873,7 @@ class SyncEngine : public BaseEngine { } //////////////////////////////////////Group///////////////////////// - // We currently support group with one key, and possiblely multiple values. + // We currently support group with one key, and possibly multiple values. // create a brand new context type. // group count is included in this implementation. template struct ColumnAccessorImpl {}; -// Recurvise +// Recursive template struct ColumnAccessorImpl : public SingleColumn, diff --git a/flex/engines/hqps_db/core/utils/props.h b/flex/engines/hqps_db/core/utils/props.h index 0199bf57e0bc..6e46d0abce3c 100644 --- a/flex/engines/hqps_db/core/utils/props.h +++ b/flex/engines/hqps_db/core/utils/props.h @@ -100,7 +100,7 @@ static auto get_single_prop_getter_from_selector( return graph.template GetSinglePropGetter(label, prop_name); } -// get prop getter from multiplet named property +// get prop getter from multiple named property template static auto get_prop_getters_from_named_property( const GRAPH_INTERFACE& graph, const LabelT& label, diff --git a/flex/engines/hqps_db/database/adj_list.h b/flex/engines/hqps_db/database/adj_list.h index 003495cfc24d..752f45eac12d 100644 --- a/flex/engines/hqps_db/database/adj_list.h +++ b/flex/engines/hqps_db/database/adj_list.h @@ -77,7 +77,7 @@ class EdgeIter { }; // A subGraph is a view of a simple graph, with one src label and one dst label. -// Cound be empty. +// Could be empty. template class SubGraph { public: @@ -324,7 +324,7 @@ class AdjList { // copy constructor AdjList(const AdjList& adj_list) : slice0_(adj_list.slice0_), slice1_(adj_list.slice1_) {} - // with sinle slice provided. + // with single slice provided. AdjList(const slice_t& slice0) : slice0_(slice0), slice1_() {} AdjList(const slice_t& slice0, const slice_t& slice1) : slice0_(slice0), slice1_(slice1) {} diff --git a/flex/engines/hqps_db/structures/collection.h b/flex/engines/hqps_db/structures/collection.h index fa53317fd549..33d80e6d25f2 100644 --- a/flex/engines/hqps_db/structures/collection.h +++ b/flex/engines/hqps_db/structures/collection.h @@ -37,7 +37,7 @@ class EmptyCol { // After operator like group, we need to extract the property or the count to // separate column. -// We use collection to implemention this abstraction. +// We use collection to implement this abstraction. // Currently we may not use it like vertex_set/edge_set, i.e., no dedup, no // flat, not subset on collection. @@ -404,7 +404,7 @@ class CountBuilder { } using cur_ele_tuple = typename gs::tuple_element::type; auto& cur_ele = gs::get_from_tuple(tuple); - // currenly we support vertex ele tupe and edge tuple. + // currently we support vertex ele tuple and edge tuple. if constexpr (std::tuple_size::value == 2) { auto& ele = std::get<1>(cur_ele); using vid_t = typename std::tuple_element<1, cur_ele_tuple>::type; @@ -545,7 +545,7 @@ class DistinctCountBuilder< size_t edges_num_; }; -// count the distinct number of recieved elements. +// count the distinct number of received elements. template class DistinctCountBuilder> { public: diff --git a/flex/engines/hqps_db/structures/multi_edge_set/adj_edge_set.h b/flex/engines/hqps_db/structures/multi_edge_set/adj_edge_set.h index 1b815c47ff0e..69d7fe42c6c0 100644 --- a/flex/engines/hqps_db/structures/multi_edge_set/adj_edge_set.h +++ b/flex/engines/hqps_db/structures/multi_edge_set/adj_edge_set.h @@ -369,7 +369,7 @@ class AdjEdgeSet { LOG(WARNING) << "No implemented"; } - // fill builtin props withour repeat array. + // fill builtin props without repeat array. template void fillBuiltinProps(std::vector>& tuples, PropNameArray& prop_names) { @@ -487,7 +487,7 @@ class AdjEdgeSet { LOG(WARNING) << "No implemented"; } - // fill builtin props withour repeat array. + // fill builtin props without repeat array. template void fillBuiltinProps(std::vector>& tuples, PropNameArray& prop_names) { diff --git a/flex/engines/hqps_db/structures/multi_edge_set/untyped_edge_set.h b/flex/engines/hqps_db/structures/multi_edge_set/untyped_edge_set.h index 36ce6afc2aa2..2267c8b6230c 100644 --- a/flex/engines/hqps_db/structures/multi_edge_set/untyped_edge_set.h +++ b/flex/engines/hqps_db/structures/multi_edge_set/untyped_edge_set.h @@ -189,7 +189,7 @@ class UnTypedEdgeSet { } iterator begin() const { - // generate a vector of vistable edge iterators. + // generate a vector of visitable edge iterators. auto tmp = generate_iters(); return iterator(src_vertices_, std::move(tmp), 0); } diff --git a/flex/engines/hqps_db/structures/multi_vertex_set/multi_label_vertex_set.h b/flex/engines/hqps_db/structures/multi_vertex_set/multi_label_vertex_set.h index 7ff3fc0329cb..a25dcaf4b100 100644 --- a/flex/engines/hqps_db/structures/multi_vertex_set/multi_label_vertex_set.h +++ b/flex/engines/hqps_db/structures/multi_vertex_set/multi_label_vertex_set.h @@ -325,7 +325,7 @@ class MultiLabelVertexSet { return std::move(*this); } - // Filter vertex sets with expresion and labels. + // Filter vertex sets with expression and labels. template , diff --git a/flex/engines/hqps_db/structures/multi_vertex_set/row_vertex_set.h b/flex/engines/hqps_db/structures/multi_vertex_set/row_vertex_set.h index c06373f45843..9f834ae69106 100644 --- a/flex/engines/hqps_db/structures/multi_vertex_set/row_vertex_set.h +++ b/flex/engines/hqps_db/structures/multi_vertex_set/row_vertex_set.h @@ -1139,7 +1139,7 @@ class RowVertexSetImpl { repeat_array); } - // fill builtin props withour repeat array. + // fill builtin props without repeat array. template void fillBuiltinProps(std::vector>& tuples, const PropNameArray& prop_names) { @@ -1427,14 +1427,14 @@ class RowVertexSetImpl { std::move(new_datas)); } - // Removed_indices is not repest to current set's indices. + // Removed_indices is not with respect to current set's indices. // It refer to the indices_range's index. // removed = [1] // indices_range = [0, 3, 5, 8] // Then we should remove eles in [3,5) // indices became // [0, 3, 6], - // num _elemenst 8 -> 6 + // num _elements 8 -> 6 // return the new offset range std::vector SubSetWithRemovedIndices( std::vector& removed_indices, diff --git a/flex/engines/http_server/actor/admin_actor.act.cc b/flex/engines/http_server/actor/admin_actor.act.cc index aecda0d147ac..3985e7ac30f7 100644 --- a/flex/engines/http_server/actor/admin_actor.act.cc +++ b/flex/engines/http_server/actor/admin_actor.act.cc @@ -130,7 +130,7 @@ seastar::future admin_actor::run_delete_graph( // load the graph. seastar::future admin_actor::run_graph_loading( graph_management_param&& query_param) { - // query_param constains two parameter, first for graph name, second for graph + // query_param contains two parameter, first for graph name, second for graph // config auto content = query_param.content; auto& graph_name = content.first; @@ -278,7 +278,7 @@ seastar::future admin_actor::update_procedure( } // Start service on a graph first means stop all current running actors, then -// switch graph and and create new actors with a unused scope_id. +// switch graph and create new actors with a unused scope_id. seastar::future admin_actor::start_service( query_param&& query_param) { // parse query_param.content as json and get graph_name diff --git a/flex/engines/http_server/actor/admin_actor.act.h b/flex/engines/http_server/actor/admin_actor.act.h index 8c70ee528cb2..556c84936e22 100644 --- a/flex/engines/http_server/actor/admin_actor.act.h +++ b/flex/engines/http_server/actor/admin_actor.act.h @@ -57,7 +57,7 @@ class ANNOTATION(actor:impl) admin_actor : public hiactor::actor { seastar::future ANNOTATION(actor:method) node_status(query_param&& param); - // DECLARE_RUN_QUERYS; + // DECLARE_RUN_QUERIES; /// Declare `do_work` func here, no need to implement. ACTOR_DO_WORK() diff --git a/flex/engines/http_server/actor/codegen_actor.act.cc b/flex/engines/http_server/actor/codegen_actor.act.cc index 6e1be0151859..8d0291258012 100644 --- a/flex/engines/http_server/actor/codegen_actor.act.cc +++ b/flex/engines/http_server/actor/codegen_actor.act.cc @@ -39,7 +39,7 @@ codegen_actor::codegen_actor(hiactor::actor_base* exec_ctx, seastar::future codegen_actor::do_codegen(query_param&& param) { LOG(INFO) << "Running codegen for " << param.content.size(); - // The received query's pay load shoud be able to deserialze to physical plan + // The received query's pay load should be able to deserialize to physical plan auto& str = param.content; if (str.size() <= 0) { LOG(INFO) << "Empty query"; diff --git a/flex/engines/http_server/actor/codegen_actor.act.h b/flex/engines/http_server/actor/codegen_actor.act.h index 597b40167082..6141ead6b1a5 100644 --- a/flex/engines/http_server/actor/codegen_actor.act.h +++ b/flex/engines/http_server/actor/codegen_actor.act.h @@ -30,7 +30,7 @@ class ANNOTATION(actor:impl) codegen_actor : public hiactor::actor { seastar::future ANNOTATION(actor:method) do_codegen(query_param&& param); - // DECLARE_RUN_QUERYS; + // DECLARE_RUN_QUERIES; /// Declare `do_work` func here, no need to implement. ACTOR_DO_WORK() diff --git a/flex/engines/http_server/actor/executor.act.h b/flex/engines/http_server/actor/executor.act.h index 63f03f518f7e..cc8145011519 100644 --- a/flex/engines/http_server/actor/executor.act.h +++ b/flex/engines/http_server/actor/executor.act.h @@ -31,7 +31,7 @@ class ANNOTATION(actor:impl) executor : public hiactor::actor { seastar::future ANNOTATION(actor:method) run_graph_db_query(query_param&& param); - // DECLARE_RUN_QUERYS; + // DECLARE_RUN_QUERIES; /// Declare `do_work` func here, no need to implement. ACTOR_DO_WORK() diff --git a/flex/engines/http_server/handler/hqps_http_handler.cc b/flex/engines/http_server/handler/hqps_http_handler.cc index 5b57cc5bb384..8cef85c9e0ca 100644 --- a/flex/engines/http_server/handler/hqps_http_handler.cc +++ b/flex/engines/http_server/handler/hqps_http_handler.cc @@ -119,7 +119,7 @@ seastar::future> hqps_ic_handler::handle( }); } -// a handler for handl adhoc query. +// a handler to handle adhoc query. hqps_adhoc_query_handler::hqps_adhoc_query_handler( uint32_t init_adhoc_group_id, uint32_t init_codegen_group_id, diff --git a/flex/interactive/bin/gs_interactive b/flex/interactive/bin/gs_interactive index 8438d43efec0..96bcf1bbaa97 100755 --- a/flex/interactive/bin/gs_interactive +++ b/flex/interactive/bin/gs_interactive @@ -41,7 +41,7 @@ emph(){ # source: https://github.com/mrbaseman/parse_yaml.git # ############################################################################### -# Parses a YAML file and outputs variable assigments. Can optionally accept a +# Parses a YAML file and outputs variable assignments. Can optionally accept a # variable name prefix and a variable name separator # # Usage: @@ -241,7 +241,7 @@ DOCKER_DB_COMPILER_BIN="com.alibaba.graphscope.GraphServer" DOCKER_DB_GEN_BIN="${DOCKER_DB_GRAPHSCOPE_HOME}/bin/load_plan_and_gen.sh" HOST_DB_TMP_DIR="/tmp/" -#################### DEFINE DEFAULT CONSTATNS #################### +#################### DEFINE DEFAULT CONSTANTS #################### DATABASE_VERSION="v0.0.3" DATABASE_DEFAULT_GRAPH_NAME="gs_interactive_default_graph" DATABASE_CURRENT_GRAPH_NAME=${DATABASE_DEFAULT_GRAPH_NAME} @@ -638,7 +638,7 @@ function compile_usage(){ EOF } -## .enable and .disable file contols the stored procedure enable/disable +## .enable and .disable file controls the stored procedure enable/disable function enable_proc_usage(){ cat << EOF @@ -855,7 +855,7 @@ function do_init(){ # check running containers and exit check_running_containers_and_exit info "Ok, no running instance found, start init database..." - # if no containers running, procede to init + # if no containers running, proceed to init # check args num 1, and get the first args as CONFIG_FILE if [ $# -eq 0 ]; then @@ -1038,7 +1038,7 @@ function do_create(){ fi check_file_exists "${schema_file}" amplify_schema_file="${HOST_DB_TMP_DIR}/graph0.yaml" - # add some default settings and non-user-awared settings to schema file. + # add some default settings and non-user-aware settings to schema file. amplify_graph_schema ${schema_file} ${amplify_schema_file} # check graph is running inside docker check_graph_not_running ${graph_name} || (err "Can not create graph ${graph_name}, since a graph with same nameing running." && exit 1) @@ -1403,7 +1403,7 @@ function do_start(){ # regenerate graph.yaml from graph0.yaml and override graph.yaml with stored procedure enable and disable update_graph_yaml_with_procedure_enabling ${graph_name} - # the bulk_load_file shoud place inside ${DATABASE_WORKSPACE}. and should use relative path + # the bulk_load_file should place inside ${DATABASE_WORKSPACE}. and should use relative path if [ -f "${HOST_DB_RUNNING_FILE}" ]; then . ${HOST_DB_ENV_FILE} fi @@ -1587,7 +1587,7 @@ function do_log(){ } # the compiled dynamic libs will be placed at data/${graph_name}/plugins/ -# after compilation, the user need to write the cooresponding yaml, telling the compiler about +# after compilation, the user need to write the corresponding yaml, telling the compiler about # the input and output of the stored procedure function do_compile() { ensure_container_running diff --git a/flex/resources/hqps/CMakeLists.txt.template b/flex/resources/hqps/CMakeLists.txt.template index 83799ed603b3..f8c0d8445445 100644 --- a/flex/resources/hqps/CMakeLists.txt.template +++ b/flex/resources/hqps/CMakeLists.txt.template @@ -23,7 +23,7 @@ else() message(FATAL_ERROR "FLEX_INCLUDE_PREFIX not set") endif() -# try to find mpi, if not found warnning +# try to find mpi, if not found warning find_package(MPI) if (MPI_FOUND) include_directories(SYSTEM ${MPI_CXX_INCLUDE_PATH}) diff --git a/flex/storages/rt_mutable_graph/loader/abstract_arrow_fragment_loader.h b/flex/storages/rt_mutable_graph/loader/abstract_arrow_fragment_loader.h index f1ef384a6939..ddb1e7f5c91c 100644 --- a/flex/storages/rt_mutable_graph/loader/abstract_arrow_fragment_loader.h +++ b/flex/storages/rt_mutable_graph/loader/abstract_arrow_fragment_loader.h @@ -258,7 +258,7 @@ static void append_edges( } // A AbstractArrowFragmentLoader with can load fragment from arrow::table. -// Can not be used directly, should be inherited. +// Cannot be used directly, should be inherited. class AbstractArrowFragmentLoader : public IFragmentLoader { public: AbstractArrowFragmentLoader(const std::string& work_dir, const Schema& schema, diff --git a/flex/storages/rt_mutable_graph/loader/loader_factory.h b/flex/storages/rt_mutable_graph/loader/loader_factory.h index 8fb24b79e3ae..fb71435deeb6 100644 --- a/flex/storages/rt_mutable_graph/loader/loader_factory.h +++ b/flex/storages/rt_mutable_graph/loader/loader_factory.h @@ -24,7 +24,7 @@ namespace gs { /** * @brief LoaderFactory is a factory class to create IFragmentLoader. - * Support Using dynamicly built library as plugin. + * Support Using dynamically built library as plugin. */ class LoaderFactory { public: diff --git a/flex/storages/rt_mutable_graph/schema.cc b/flex/storages/rt_mutable_graph/schema.cc index a1b8deb59514..e97c1c59589d 100644 --- a/flex/storages/rt_mutable_graph/schema.cc +++ b/flex/storages/rt_mutable_graph/schema.cc @@ -623,7 +623,7 @@ static bool parse_vertex_schema(YAML::Node node, Schema& schema) { if (!get_scalar(node, "type_name", label_name)) { return false; } - // Can not add two vertex label with same name + // Cannot add two vertex label with same name if (schema.has_vertex_label(label_name)) { LOG(ERROR) << "Vertex label " << label_name << " already exists"; return false; diff --git a/flex/third_party/httplib.h b/flex/third_party/httplib.h index 12c7b85d70f1..709ac4cbe5e0 100644 --- a/flex/third_party/httplib.h +++ b/flex/third_party/httplib.h @@ -1101,7 +1101,7 @@ class ClientImpl { void copy_settings(const ClientImpl &rhs); - // Socket endoint information + // Socket endpoint information const std::string host_; const int port_; const std::string host_and_port_; @@ -5819,7 +5819,7 @@ Server::process_request(Stream &strm, bool close_connection, } } - // Rounting + // Routing bool routed = false; #ifdef CPPHTTPLIB_NO_EXCEPTIONS routed = routing(req, res, strm); @@ -6258,7 +6258,7 @@ inline bool ClientImpl::write_content_with_provider(Stream &strm, auto is_shutting_down = []() { return false; }; if (req.is_chunked_content_provider_) { - // TODO: Brotli suport + // TODO: Brotli support std::unique_ptr compressor; #ifdef CPPHTTPLIB_ZLIB_SUPPORT if (compress_) { @@ -7062,7 +7062,7 @@ inline void ClientImpl::stop() { return; } - // Otherwise, sitll holding the mutex, we can shut everything down ourselves + // Otherwise, still holding the mutex, we can shut everything down ourselves shutdown_ssl(socket_, true); shutdown_socket(socket_); close_socket(socket_); diff --git a/flex/third_party/odps/include/common/configuration.h b/flex/third_party/odps/include/common/configuration.h index a229f3960a0d..4bed727d694d 100644 --- a/flex/third_party/odps/include/common/configuration.h +++ b/flex/third_party/odps/include/common/configuration.h @@ -159,7 +159,7 @@ class Configuration { static const int DEFAULT_SOCKET_TIMEOUT = 300; /**< Socket超时默认时间,单位是秒*/ - Account account; /**< Acount类*/ + Account account; /**< Account类*/ AppAccount appAccount; /**< AppAccount类,用于双签名*/ StsToken stsToken; diff --git a/flex/third_party/odps/include/common/exception.h b/flex/third_party/odps/include/common/exception.h index 66e229e06c51..c87f73393316 100644 --- a/flex/third_party/odps/include/common/exception.h +++ b/flex/third_party/odps/include/common/exception.h @@ -3,7 +3,7 @@ */ /** - * Guildelines for exception handling + * Guidelines for exception handling * 1. How to throw * APSARA_THROW(the_exception_to_show, description_string) * e.g. APSARA_THROW(ParameterInvalidException, "Cannot be empty string"); @@ -20,7 +20,7 @@ * details. By default, it returns the description string when you construct the * exception. GetStackTrace(): call stack when the exception is thrown. * ToString(): return a string describing: - * a) where the exception is thown (if available) + * a) where the exception is thrown (if available) * b) the exception class name * c) the content of GetMessage * d) stack trace diff --git a/flex/third_party/odps/include/common/md5.h b/flex/third_party/odps/include/common/md5.h index 8b7614010638..80b52b57945a 100644 --- a/flex/third_party/odps/include/common/md5.h +++ b/flex/third_party/odps/include/common/md5.h @@ -109,7 +109,7 @@ class Md5Stream { */ void Initailize(); - bool mLittleEndian; /// true if litte endian, false if big endian + bool mLittleEndian; /// true if little endian, false if big endian uint8_t mBuf[64]; /// hold remained input stream uint64_t mBufPosition; /// indicate the position of stream end in buf uint32_t mH[4]; /// hold inter result diff --git a/flex/third_party/odps/include/storage_api.hpp b/flex/third_party/odps/include/storage_api.hpp index a6503d56be54..1ba2b763c40d 100644 --- a/flex/third_party/odps/include/storage_api.hpp +++ b/flex/third_party/odps/include/storage_api.hpp @@ -130,7 +130,7 @@ class Client { enum Status { OK = 0, FAIL, - WAIT, // CreateReadSession() and CommitWriteSession() may process the requst + WAIT, // CreateReadSession() and CommitWriteSession() may process the request // asynchronously CANCELED, }; diff --git a/flex/third_party/odps/include/storage_api_arrow.hpp b/flex/third_party/odps/include/storage_api_arrow.hpp index 3b4af37042b4..413f8fe45943 100644 --- a/flex/third_party/odps/include/storage_api_arrow.hpp +++ b/flex/third_party/odps/include/storage_api_arrow.hpp @@ -50,7 +50,7 @@ class ArrowClient { * @brief Read one split of the read session. * * @param request Read rows request parameters. - * @param cache_size Number of not readed record baches cached in the memory. + * @param cache_size Number of not read record baches cached in the memory. * * @return Record batch reader. */ @@ -149,7 +149,7 @@ class Reader { friend class ArrowClient; friend class internal::ArrowStreamListener; - // the data are asyn read from the server and cached in the record_batches_ + // the data are async read from the server and cached in the record_batches_ BlockingQueue> record_batches_; ReadRowsResp resp_; @@ -188,7 +188,7 @@ class Writer { * * @return Whether there is error when writing the data. * Note: As the record batch is first cached in the memory and sent - * to server later, return true doesn't mean the data has been transfered to + * to server later, return true doesn't mean the data has been transferred to * the server. */ bool Write(std::shared_ptr record_batch); @@ -422,7 +422,7 @@ inline void Reader::ReadRowsThread(const ReadRowsReq request, throw std::bad_alloc(); } auto buffer = std::move(result).ValueOrDie(); - // we should guarantee the memory is valid during reocrd_batch processing + // we should guarantee the memory is valid during record_batch processing // and so copy the memory here memcpy(buffer->mutable_data(), data, len); auto status = decoder->Consume(std::move(buffer)); diff --git a/flex/utils/property/types.h b/flex/utils/property/types.h index d95fd5043661..d4f32544768d 100644 --- a/flex/utils/property/types.h +++ b/flex/utils/property/types.h @@ -911,7 +911,7 @@ struct AnyConverter { } }; -// specilization for float +// specialization for float template <> struct AnyConverter { static PropertyType type() { return PropertyType::kFloat; } diff --git a/flex/utils/service_utils.cc b/flex/utils/service_utils.cc index f74c23985d9a..9421a7e01d19 100644 --- a/flex/utils/service_utils.cc +++ b/flex/utils/service_utils.cc @@ -59,7 +59,7 @@ std::string find_codegen_bin() { // usr/local/ LOG(INFO) << "infer flex_home as installed, flex_home: " << flex_home_str; - // check codege_bin_path exists + // check codegen_bin path exists codegen_bin = flex_home_str + "/bin/" + CODEGEN_BIN; // if flex_home exists, return flex_home if (std::filesystem::exists(codegen_bin)) { diff --git a/interactive_engine/assembly/src/bin/graphscope/giectl b/interactive_engine/assembly/src/bin/graphscope/giectl index 040c7f22cce8..ad8a7fb2fdd8 100755 --- a/interactive_engine/assembly/src/bin/graphscope/giectl +++ b/interactive_engine/assembly/src/bin/graphscope/giectl @@ -235,7 +235,7 @@ create_gremlin_instance_on_local() { # /var/log/graphscope is not existed/writable, switch to ${HOME}/.local/log/graphscope GS_LOG=${HOME}/.local/log/graphscope fi - # init Graphscope log location + # init GraphScope log location readonly GS_LOG mkdir -p ${GS_LOG} diff --git a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/ir/rex/operator/CaseOperator.java b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/ir/rex/operator/CaseOperator.java index 03f577140d77..adc1dda255b6 100644 --- a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/ir/rex/operator/CaseOperator.java +++ b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/ir/rex/operator/CaseOperator.java @@ -68,7 +68,7 @@ public boolean checkOperandTypes(SqlCallBinding callBinding, boolean throwOnFail } if (!foundNotNull) { - // according to the sql standard we can not have all of the THEN + // according to the sql standard we cannot have all of the THEN // statements and the ELSE returning null if (throwOnFailure && !callBinding.isTypeCoercionEnabled()) { throw callBinding.newValidationError(RESOURCE.mustNotNullInElse()); diff --git a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/ir/runtime/proto/RexToProtoConverter.java b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/ir/runtime/proto/RexToProtoConverter.java index b282b265f573..910460583658 100644 --- a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/ir/runtime/proto/RexToProtoConverter.java +++ b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/ir/runtime/proto/RexToProtoConverter.java @@ -220,7 +220,7 @@ private OuterExpression.Expression visitUnaryOperator( private OuterExpression.Expression visitBinaryOperator(RexCall call) { if (call.getOperator().getKind() == SqlKind.SEARCH) { - // ir core can not support continuous ranges in a search operator, here expand it to + // ir core cannot support continuous ranges in a search operator, here expand it to // compositions of 'and' or 'or', // i.e. a.age SEARCH [[1, 10]] -> a.age >= 1 and a.age <= 10 RexNode left = call.getOperands().get(0); diff --git a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/utils/FileUtils.java b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/utils/FileUtils.java index 34c4ddbd4a5c..ffa5e8992d22 100644 --- a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/utils/FileUtils.java +++ b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/common/utils/FileUtils.java @@ -40,7 +40,7 @@ public static String readJsonFromResource(String file) { } public static FileFormatType getFormatType(String file) throws IOException { - // can not differentiate between properties and YAML format files based on their content, + // cannot differentiate between properties and YAML format files based on their content, // so here the determination is made based on the file extension. if (file.endsWith(".properties")) return FileFormatType.PROPERTIES; try (InputStream inputStream = new FileInputStream(file)) { diff --git a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/gremlin/integration/graph/RemoteTestGraph.java b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/gremlin/integration/graph/RemoteTestGraph.java index 6556b8cff818..8bcf3191a451 100644 --- a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/gremlin/integration/graph/RemoteTestGraph.java +++ b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/gremlin/integration/graph/RemoteTestGraph.java @@ -312,7 +312,7 @@ reason = "unsupported") // @Graph.OptOut( // test = "org.apache.tinkerpop.gremlin.process.traversal.step.map.CountTest", -// method = "g_V_whereXinXkknowsX_outXcreatedX_count_is_0XX_name", +// method = "g_V_whereXinXknowsX_outXcreatedX_count_is_0XX_name", // reason = "unsupported") @Graph.OptOut( test = "org.apache.tinkerpop.gremlin.process.traversal.step.map.GraphTest", diff --git a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/gremlin/transform/TraversalParentTransformFactory.java b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/gremlin/transform/TraversalParentTransformFactory.java index 8597033ceae1..958ee018c0c5 100644 --- a/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/gremlin/transform/TraversalParentTransformFactory.java +++ b/interactive_engine/compiler/src/main/java/com/alibaba/graphscope/gremlin/transform/TraversalParentTransformFactory.java @@ -216,7 +216,7 @@ private long getSeed(SampleGlobalStep step) { } }, // order().by("name"), order().by(values("name")) -> [OrderOp("@.name")] - // order().by(valueMap("name")) -> can not convert to FfiVariable with valueMap + // order().by(valueMap("name")) -> cannot convert to FfiVariable with valueMap // order().by(select("a").by("name")), order().by(select("a").by(values("name"))) -> // OrderOp("@a.name") // order().by(out().count) -> [ApplyOp(out().count()).as("order_1_apply"), diff --git a/interactive_engine/executor/common/dyn_type/tests/datetime_formats.rs b/interactive_engine/executor/common/dyn_type/tests/datetime_formats.rs index 523a217792cb..099b4aeb4468 100644 --- a/interactive_engine/executor/common/dyn_type/tests/datetime_formats.rs +++ b/interactive_engine/executor/common/dyn_type/tests/datetime_formats.rs @@ -141,7 +141,7 @@ mod tests { #[test] fn test_dateformat_cmp() { - // Date comparason + // Date comparison // 2020-10-10 let date = NaiveDate::from_ymd_opt(2020, 10, 10).unwrap(); let date_format = DateTimeFormats::Date(date); @@ -150,7 +150,7 @@ mod tests { let date_format2 = DateTimeFormats::Date(date2); assert!(date_format < date_format2); - // Time comparason + // Time comparison // 10:10:10 let time = NaiveTime::from_hms_opt(10, 10, 10).unwrap(); let time_format = DateTimeFormats::Time(time); @@ -159,7 +159,7 @@ mod tests { let time_format2 = DateTimeFormats::Time(time2); assert!(time_format < time_format2); - // DateTime comparason + // DateTime comparison // 2020-10-10 10:10:10.100 let date_time = NaiveDateTime::parse_from_str("2020-10-10 10:10:10.100", "%Y-%m-%d %H:%M:%S%.f").unwrap(); @@ -170,7 +170,7 @@ mod tests { let date_time_format2 = DateTimeFormats::DateTime(date_time2); assert!(date_time_format < date_time_format2); - // DateTimeWithTz comparason + // DateTimeWithTz comparison // 2020-10-09T23:10:10.100-11:00 let date_time_with_tz = DateTime::parse_from_rfc3339("2020-10-09T23:10:10.100-11:00").unwrap(); let date_time_with_tz_format = DateTimeFormats::DateTimeWithTz(date_time_with_tz); diff --git a/interactive_engine/executor/cov.sh b/interactive_engine/executor/cov.sh index d86760812ff5..5662cd60787d 100755 --- a/interactive_engine/executor/cov.sh +++ b/interactive_engine/executor/cov.sh @@ -37,7 +37,7 @@ run_module() { --logfile ${TMP_LOG} # It's treated as error only when module's unit test exits abnormally, # otherwise it runs successfully no matter tests passed or failed. - # For the 126 maigc, see: https://www.gnu.org/software/bash/manual/bash.html#Exit-Status + # For the 126 magic, see: https://www.gnu.org/software/bash/manual/bash.html#Exit-Status if [ "$?" -ge 126 ]; then echo "Module's unit test exits abnormally, maybe killed with some signal." exit 1 diff --git a/interactive_engine/executor/engine/pegasus/common/src/codec/mod.rs b/interactive_engine/executor/engine/pegasus/common/src/codec/mod.rs index 666aea7df614..e4fac682904c 100644 --- a/interactive_engine/executor/engine/pegasus/common/src/codec/mod.rs +++ b/interactive_engine/executor/engine/pegasus/common/src/codec/mod.rs @@ -83,7 +83,7 @@ pub trait Encode { fn write_to(&self, writer: &mut W) -> io::Result<()>; } -/// The deserialize interface used for decoding tryped structures from binary stream; +/// The deserialize interface used for decoding typed structures from binary stream; /// /// # Examples /// ``` diff --git a/interactive_engine/executor/engine/pegasus/graph/src/topo.rs b/interactive_engine/executor/engine/pegasus/graph/src/topo.rs index 0ac2e8935302..979b52c12001 100644 --- a/interactive_engine/executor/engine/pegasus/graph/src/topo.rs +++ b/interactive_engine/executor/engine/pegasus/graph/src/topo.rs @@ -40,7 +40,7 @@ impl> IdTopo { } } - // depth firsh search k-hop neighbors; + // depth first search k-hop neighbors; pub fn get_k_hop_neighbors(&self, id: u64, k: u8) -> Box> { if k == 0 { return Box::new(std::iter::empty()); diff --git a/interactive_engine/executor/engine/pegasus/pegasus/src/api/concise/keyed/join.rs b/interactive_engine/executor/engine/pegasus/pegasus/src/api/concise/keyed/join.rs index 97181a70e760..411dfca9d753 100644 --- a/interactive_engine/executor/engine/pegasus/pegasus/src/api/concise/keyed/join.rs +++ b/interactive_engine/executor/engine/pegasus/pegasus/src/api/concise/keyed/join.rs @@ -27,7 +27,7 @@ use crate::Data; /// /// We now implement 6 types of joins, namely [`inner_join`], [`left_outer_join`], /// [`right_outer_join`], [`full_outer_join`], [`semi_join`] and [`anti_join`]. -/// While [`semi_join`] and [`anti_join`] have the variances of left and right, but we only consier +/// While [`semi_join`] and [`anti_join`] have the variances of left and right, but we only consider /// the left case, knowing that the right case can be easily achieved by swapping the two streams. /// /// These joins are for now executed based on the equivalence of the keys of the left and right diff --git a/interactive_engine/executor/engine/pegasus/pegasus/src/api/concise/keyed/mod.rs b/interactive_engine/executor/engine/pegasus/pegasus/src/api/concise/keyed/mod.rs index d3663771c80a..534c1c0a0e80 100644 --- a/interactive_engine/executor/engine/pegasus/pegasus/src/api/concise/keyed/mod.rs +++ b/interactive_engine/executor/engine/pegasus/pegasus/src/api/concise/keyed/mod.rs @@ -97,7 +97,7 @@ impl Debug for Pair { /// `KeyBy` is used to transform any input data into a key-value pair. pub trait KeyBy { /// Given a user-defined function `selection`, this function is actually a map function that - /// transform each input data into a [`Pair`]. Addtionally, the output data must be repartitioned + /// transform each input data into a [`Pair`]. Additionally, the output data must be repartitioned /// via the key part of [`Pair`], such that all data with the same key will be guaranteed to /// arrive at the same worker. /// diff --git a/interactive_engine/executor/engine/pegasus/pegasus/src/communication/output/builder.rs b/interactive_engine/executor/engine/pegasus/pegasus/src/communication/output/builder.rs index a344b516c988..1e0d29f5ad1f 100644 --- a/interactive_engine/executor/engine/pegasus/pegasus/src/communication/output/builder.rs +++ b/interactive_engine/executor/engine/pegasus/pegasus/src/communication/output/builder.rs @@ -30,7 +30,7 @@ use crate::Data; #[derive(Copy, Clone, Debug)] pub struct OutputMeta { pub port: Port, - /// This is the the scope level of operator with this output port belongs to. + /// This is the scope level of operator with this output port belongs to. pub scope_level: u32, pub batch_size: usize, pub batch_capacity: u32, diff --git a/interactive_engine/executor/engine/pegasus/pegasus/src/communication/output/tee.rs b/interactive_engine/executor/engine/pegasus/pegasus/src/communication/output/tee.rs index 677f21755b96..88edcc3f6cf8 100644 --- a/interactive_engine/executor/engine/pegasus/pegasus/src/communication/output/tee.rs +++ b/interactive_engine/executor/engine/pegasus/pegasus/src/communication/output/tee.rs @@ -223,7 +223,7 @@ impl Push> for PerChannelPush { batch.set_tag(tag); self.push.push(batch) } else { - //is not end, and is emtpy, ignore; + //is not end, and is empty, ignore; Ok(()) } } else if batch.tag.len() < self.delta.origin_scope_level { diff --git a/interactive_engine/executor/engine/pegasus/pegasus/src/operator/mod.rs b/interactive_engine/executor/engine/pegasus/pegasus/src/operator/mod.rs index 1366b0dbd9c7..91c37f96f1b1 100644 --- a/interactive_engine/executor/engine/pegasus/pegasus/src/operator/mod.rs +++ b/interactive_engine/executor/engine/pegasus/pegasus/src/operator/mod.rs @@ -350,7 +350,7 @@ impl Operator { result } - // cancel output data of the scope on output port: `port`, if all output ports have canceled outputing + // cancel output data of the scope on output port: `port`, if all output ports have canceled outputting // this scope, the operator will cancel consuming the data of this scope, and try to notify its upstream // don't producing data of this scope to it; pub fn cancel(&mut self, port: usize, tag: Tag) -> Result<(), JobExecError> { diff --git a/interactive_engine/executor/engine/pegasus/server/config/server_config.toml b/interactive_engine/executor/engine/pegasus/server/config/server_config.toml index e53a09705377..560ac28815c5 100644 --- a/interactive_engine/executor/engine/pegasus/server/config/server_config.toml +++ b/interactive_engine/executor/engine/pegasus/server/config/server_config.toml @@ -42,7 +42,7 @@ servers_size = 2 # Set addresses of your servers; # If the cluster is standalone, the size of addresses should be equal to [server_size] set above, and the addresses -# should be in order, the fisrt address would be server 0. +# should be in order, the first address would be server 0. [[network.servers]] ip = '127.0.0.1' port = 8080 diff --git a/interactive_engine/executor/engine/pegasus/server/config/tests/standalone2/server_config.toml b/interactive_engine/executor/engine/pegasus/server/config/tests/standalone2/server_config.toml index 2f4e6af50ee5..d1e0019186cb 100644 --- a/interactive_engine/executor/engine/pegasus/server/config/tests/standalone2/server_config.toml +++ b/interactive_engine/executor/engine/pegasus/server/config/tests/standalone2/server_config.toml @@ -42,7 +42,7 @@ heartbeat_sec = 5 # Set addresses of your servers; # If the cluster is standalone, the size of addresses should be equal to [server_size] set above, and the addresses -# should be in order, the fisrt address would be server 0. +# should be in order, the first address would be server 0. [[network.servers]] hostname = '192.168.1.1' port = 8080 diff --git a/interactive_engine/executor/engine/pegasus/server/examples/echo_server_0/server_config.toml b/interactive_engine/executor/engine/pegasus/server/examples/echo_server_0/server_config.toml index 3ececf2deb40..f17960976d9d 100644 --- a/interactive_engine/executor/engine/pegasus/server/examples/echo_server_0/server_config.toml +++ b/interactive_engine/executor/engine/pegasus/server/examples/echo_server_0/server_config.toml @@ -42,7 +42,7 @@ servers_size = 2 # Set addresses of your servers; # If the cluster is standalone, the size of addresses should be equal to [server_size] set above, and the addresses -# should be in order, the fisrt address would be server 0. +# should be in order, the first address would be server 0. [[network.servers]] ip = '127.0.0.1' diff --git a/interactive_engine/executor/engine/pegasus/server/examples/echo_server_1/server_config.toml b/interactive_engine/executor/engine/pegasus/server/examples/echo_server_1/server_config.toml index 2bc0ee0151ce..8f9469130ae4 100644 --- a/interactive_engine/executor/engine/pegasus/server/examples/echo_server_1/server_config.toml +++ b/interactive_engine/executor/engine/pegasus/server/examples/echo_server_1/server_config.toml @@ -42,7 +42,7 @@ servers_size = 2 # Set addresses of your servers; # If the cluster is standalone, the size of addresses should be equal to [server_size] set above, and the addresses -# should be in order, the fisrt address would be server 0. +# should be in order, the first address would be server 0. [[network.servers]] ip = '127.0.0.1' diff --git a/interactive_engine/executor/ir/common/src/expr_parse/error.rs b/interactive_engine/executor/ir/common/src/expr_parse/error.rs index 2ee9ebe15627..84c7fb7ebecb 100644 --- a/interactive_engine/executor/ir/common/src/expr_parse/error.rs +++ b/interactive_engine/executor/ir/common/src/expr_parse/error.rs @@ -26,7 +26,7 @@ pub type ExprResult = Result; pub enum ExprError { /// The left brace may not be closed by a right brace UnmatchedLRBraces, - /// The left bracket may not be closed by a right braket + /// The left bracket may not be closed by a right bracket UnmatchedLRBrackets, /// An escape sequence within a string literal is illegal. IllegalEscapeSequence(String), diff --git a/interactive_engine/executor/ir/common/src/expr_parse/token.rs b/interactive_engine/executor/ir/common/src/expr_parse/token.rs index 92656f3d571f..3726ae394536 100644 --- a/interactive_engine/executor/ir/common/src/expr_parse/token.rs +++ b/interactive_engine/executor/ir/common/src/expr_parse/token.rs @@ -404,7 +404,7 @@ fn partial_tokens_to_tokens(mut tokens: &[PartialToken]) -> ExprResult { // Must check whether previous is what diff --git a/interactive_engine/executor/ir/common/src/utils.rs b/interactive_engine/executor/ir/common/src/utils.rs index 974fdeffa186..49076c65b09a 100644 --- a/interactive_engine/executor/ir/common/src/utils.rs +++ b/interactive_engine/executor/ir/common/src/utils.rs @@ -688,7 +688,7 @@ impl From for common_pb::Value { // convert to days since from 1970-01-01 item: (date .and_hms_opt(0, 0, 0) - .unwrap() // can savely unwrap since it is valid hour/min/sec + .unwrap() // can safely unwrap since it is valid hour/min/sec .timestamp() / 86400) as i32, }), diff --git a/interactive_engine/executor/ir/core/src/glogue/extend_step.rs b/interactive_engine/executor/ir/core/src/glogue/extend_step.rs index 90a876e991ec..4bf6ba72c415 100644 --- a/interactive_engine/executor/ir/core/src/glogue/extend_step.rs +++ b/interactive_engine/executor/ir/core/src/glogue/extend_step.rs @@ -147,7 +147,7 @@ impl ExactExtendStep { vec![extend_edge.generate_path_expands(path_opr)?] } }; - // every exapand should followed by an getV operator to close + // every expand should followed by an getV operator to close let get_v = self.generate_get_v_operator(origin_pattern, extend_edge.get_direction(), is_pure_path)?; expand_oprs.push(get_v.clone()); diff --git a/interactive_engine/executor/ir/core/src/glogue/pattern.rs b/interactive_engine/executor/ir/core/src/glogue/pattern.rs index 3197ce86fa36..ea11e0559f83 100644 --- a/interactive_engine/executor/ir/core/src/glogue/pattern.rs +++ b/interactive_engine/executor/ir/core/src/glogue/pattern.rs @@ -246,7 +246,7 @@ pub struct Pattern { max_tag_id: TagId, } -/// Initialze a Pattern from just a single Pattern Vertex +/// Initialize a Pattern from just a single Pattern Vertex impl From for Pattern { fn from(vertex: PatternVertex) -> Pattern { let vid = vertex.id; @@ -260,7 +260,7 @@ impl From for Pattern { } } -/// Initialize a Pattern from a vertor of Pattern Edges +/// Initialize a Pattern from a vector of Pattern Edges impl From> for Pattern { fn from(edges: Vec) -> Pattern { let mut new_pattern = Pattern::default(); @@ -351,10 +351,10 @@ impl Pattern { let end_tag_v_id = end_tag.map(|tag| tag as PatternId); // check the end tag label is already determined or not let end_tag_label = end_tag_v_id.and_then(|v_id| vertex_labels_map.get(&v_id).cloned()); - // record previous pattern edge's destinated vertex's id + // record previous pattern edge's destination vertex's id // init as start vertex's id let mut pre_dst_vertex_id: PatternId = start_tag_v_id; - // record previous pattern edge's destinated vertex's label + // record previous pattern edge's destination vertex's label // init as start vertex's label let mut pre_dst_vertex_label = start_tag_label; // find the last edge expand's index if exists; diff --git a/interactive_engine/executor/ir/core/src/plan/ffi.rs b/interactive_engine/executor/ir/core/src/plan/ffi.rs index 8d3d10bd36eb..5ee412489262 100644 --- a/interactive_engine/executor/ir/core/src/plan/ffi.rs +++ b/interactive_engine/executor/ir/core/src/plan/ffi.rs @@ -18,7 +18,7 @@ //! //! We instruct how to use these apis as follows. //! -//! First of all, call `cbindgen` to generate the header of apis for C-binded caller, as: +//! First of all, call `cbindgen` to generate the header of apis for C-bound caller, as: //! `cbindgen --crate ir_core --output /path/to/c-caller/ir_core.h` //! //! Secondly, build the dynamic ir_core library, as: `cargo build --release`, diff --git a/interactive_engine/executor/ir/core/src/plan/logical.rs b/interactive_engine/executor/ir/core/src/plan/logical.rs index 64fbfdecc5f9..83835af9182c 100644 --- a/interactive_engine/executor/ir/core/src/plan/logical.rs +++ b/interactive_engine/executor/ir/core/src/plan/logical.rs @@ -251,7 +251,7 @@ impl LogicalPlan { let node_parent_children_len = node_parent.borrow().children.len(); node_flow += node_parent_flow / (node_parent_children_len as u64); } else { - // If one of current node's parent's flow is still not avaliable, it sugguests that + // If one of current node's parent's flow is still not avaliable, it suggests that // it is too early to get current node's flow // Therefore, we delay the current node's flow computation by adding it to the queue again // and jump to the next iteration @@ -484,7 +484,7 @@ impl LogicalPlan { self.meta.set_curr_node(new_curr_node); // Configure `NodeMeta` for current node let _ = self.meta.curr_node_meta_mut(); - // By default, refer to the nodes that the the parent nodes refer to + // By default, refer to the nodes that the parent nodes refer to // Certain operators will modify the referred nodes during preprocessing, including // Scan, EdgeExpand, PathExpand, GetV, Apply and Project let ref_parent_nodes = self.meta.get_referred_nodes(&parent_ids); diff --git a/interactive_engine/executor/ir/core/src/plan/patmat.rs b/interactive_engine/executor/ir/core/src/plan/patmat.rs index b138da82911f..c3e16392dfc2 100644 --- a/interactive_engine/executor/ir/core/src/plan/patmat.rs +++ b/interactive_engine/executor/ir/core/src/plan/patmat.rs @@ -62,7 +62,7 @@ pub trait BasicSentence: AsBaseSentence { /// Get the end tag of the `BasicSentence`, which is optional fn get_end_tag(&self) -> Option<&NameOrId>; /// Get the join kind, for identifying if the basic sentence carries the - /// anti/semi join semantics, which can not be composited + /// anti/semi join semantics, which cannot be composited fn get_join_kind(&self) -> pb::join::JoinKind; /// Get the reverse sentence, which not only reverse the start and end tag (must present), /// and the direction of all edge/path expansions if possible. @@ -86,7 +86,7 @@ pub struct BaseSentence { /// Use `pb::logical_plan::Operator` rather than `pb::Pattern::binder`, /// to facilitate building the logical plan that may translate a tag into an `As` operator. operators: Vec, - /// Is this a sentence with Anti(No)-semanatics + /// Is this a sentence with Anti(No)-semantics join_kind: pb::join::JoinKind, /// What kind of entities this sentence binds to end_as: BindingOpt, @@ -625,7 +625,7 @@ pub struct CompoSentence { tags: BTreeSet, } -/// Preprocess a plan such that it does not contain two root nodes. More speciically, +/// Preprocess a plan such that it does not contain two root nodes. More specifically, /// we will add a common `As(None)` operator as the parent of the two original roots /// in the plan, which becomes the new and only root node of the plan fn preprocess_plan(plan: &mut pb::LogicalPlan) -> IrResult<()> { diff --git a/interactive_engine/executor/ir/graph_proxy/src/adapters/csr_store/read_graph.rs b/interactive_engine/executor/ir/graph_proxy/src/adapters/csr_store/read_graph.rs index a63ecfdfe8cf..eff1b2de7b49 100644 --- a/interactive_engine/executor/ir/graph_proxy/src/adapters/csr_store/read_graph.rs +++ b/interactive_engine/executor/ir/graph_proxy/src/adapters/csr_store/read_graph.rs @@ -175,7 +175,7 @@ impl ReadGraph for CSRStore { fn count_vertex(&self, params: &QueryParams) -> GraphProxyResult { if params.filter.is_some() { - // the filter can not be pushed down to store, + // the filter cannot be pushed down to store, // so we need to scan all vertices with filter and then count Ok(self.scan_vertex(params)?.count() as u64) } else { diff --git a/interactive_engine/executor/ir/graph_proxy/src/adapters/exp_store/read_graph.rs b/interactive_engine/executor/ir/graph_proxy/src/adapters/exp_store/read_graph.rs index 671129883d8c..933b0d42a250 100644 --- a/interactive_engine/executor/ir/graph_proxy/src/adapters/exp_store/read_graph.rs +++ b/interactive_engine/executor/ir/graph_proxy/src/adapters/exp_store/read_graph.rs @@ -368,7 +368,7 @@ impl ReadGraph for ExpStore { fn count_vertex(&self, params: &QueryParams) -> GraphProxyResult { if params.filter.is_some() { - // the filter can not be pushed down to exp_store, + // the filter cannot be pushed down to exp_store, // so we need to scan all vertices with filter and then count Ok(self.scan_vertex(params)?.count() as u64) } else { diff --git a/interactive_engine/executor/ir/graph_proxy/src/adapters/gs_store/read_graph.rs b/interactive_engine/executor/ir/graph_proxy/src/adapters/gs_store/read_graph.rs index c332ff6d5788..77dc1a349e0a 100644 --- a/interactive_engine/executor/ir/graph_proxy/src/adapters/gs_store/read_graph.rs +++ b/interactive_engine/executor/ir/graph_proxy/src/adapters/gs_store/read_graph.rs @@ -106,7 +106,7 @@ where let column_filter_pushdown = self.column_filter_pushdown; // props that will be returned by storage layer let prop_ids = if column_filter_pushdown { - // props that will be used in futher compute + // props that will be used in further computations let cache_prop_ids = encode_storage_prop_keys(params.columns.as_ref())?; if row_filter_exists_but_not_pushdown { // need to call filter_limit!, so get columns in row_filter and params.columns @@ -241,7 +241,7 @@ where let column_filter_pushdown = self.column_filter_pushdown; // also need props in filter, because `filter_limit!` let prop_ids = if column_filter_pushdown { - // props that will be used in futher compute + // props that will be used in further computations let cache_prop_ids = encode_storage_prop_keys(params.columns.as_ref())?; extract_needed_columns(params.filter.as_ref(), cache_prop_ids.as_ref())? } else { @@ -452,7 +452,7 @@ where fn count_vertex(&self, params: &QueryParams) -> GraphProxyResult { if params.filter.is_some() { - // the filter can not be pushed down to store, + // the filter cannot be pushed down to store, // so we need to scan all vertices with filter and then count Ok(self.scan_vertex(params)?.count() as u64) } else { diff --git a/interactive_engine/executor/ir/integrated/config/server_config.toml b/interactive_engine/executor/ir/integrated/config/server_config.toml index 65c30cc3dc9d..4120e11190c7 100644 --- a/interactive_engine/executor/ir/integrated/config/server_config.toml +++ b/interactive_engine/executor/ir/integrated/config/server_config.toml @@ -42,7 +42,7 @@ servers_size = 1 # Set addresses of your servers; # If the cluster is standalone, the size of addresses should be equal to [server_size] set above, and the addresses -# should be in order, the fisrt address would be server 0. +# should be in order, the first address would be server 0. [[network.servers]] hostname = '127.0.0.1' port = 11234 diff --git a/interactive_engine/executor/ir/integrated/tests/catalog_test.rs b/interactive_engine/executor/ir/integrated/tests/catalog_test.rs index 21a83a25250e..2a276bd138cf 100644 --- a/interactive_engine/executor/ir/integrated/tests/catalog_test.rs +++ b/interactive_engine/executor/ir/integrated/tests/catalog_test.rs @@ -582,7 +582,7 @@ mod test { }; let expand_opr_b_c = pb::EdgeExpand { v_tag: None, - direction: 0, // outhaod + direction: 0, // out params: Some(query_params(vec![1.into()], vec![], None)), expand_opt: pb::edge_expand::ExpandOpt::Edge as i32, alias: None, diff --git a/interactive_engine/executor/ir/proto/physical.proto b/interactive_engine/executor/ir/proto/physical.proto index 0e1689c613b1..a67f14536337 100644 --- a/interactive_engine/executor/ir/proto/physical.proto +++ b/interactive_engine/executor/ir/proto/physical.proto @@ -112,9 +112,9 @@ message Join { // aka. Cartesian product TIMES = 6; } - // The key to perferm Join (on results output by left_plan) + // The key to perform Join (on results output by left_plan) repeated common.Variable left_keys = 1; - // The key to perferm Join (on results output by right_plan) + // The key to perform Join (on results output by right_plan) repeated common.Variable right_keys = 2; JoinKind join_kind = 3; PhysicalPlan left_plan = 4; diff --git a/interactive_engine/executor/ir/runtime/src/assembly.rs b/interactive_engine/executor/ir/runtime/src/assembly.rs index e66721d767e8..0500ce7947e4 100644 --- a/interactive_engine/executor/ir/runtime/src/assembly.rs +++ b/interactive_engine/executor/ir/runtime/src/assembly.rs @@ -258,7 +258,7 @@ impl IRJobAssembly { .map(move |cnt| fold_map.exec(cnt))? .into_stream()?; } else { - // TODO: optimize this by fold_partiton + fold + // TODO: optimize this by fold_partition + fold let fold_accum = fold.gen_fold_accum()?; stream = stream .fold(fold_accum, || { @@ -680,7 +680,7 @@ impl IRJobAssembly { // do nothing, as it is a dummy node } OpKind::Sink(_) => { - // this would be processed in assemble, and can not be reached when install. + // this would be processed in assemble, and cannot be reached when install. Err(FnGenError::unsupported_error("unreachable sink in install"))? } } diff --git a/interactive_engine/executor/ir/runtime/src/process/operator/group/group.rs b/interactive_engine/executor/ir/runtime/src/process/operator/group/group.rs index 02298ca31220..7c0db4f13d06 100644 --- a/interactive_engine/executor/ir/runtime/src/process/operator/group/group.rs +++ b/interactive_engine/executor/ir/runtime/src/process/operator/group/group.rs @@ -51,7 +51,7 @@ impl GroupGen for pb::GroupBy { #[derive(Debug)] struct GroupMap { - /// aliases for group keys, if some key is not not required to be preserved, give None alias + /// aliases for group keys, if some key is not required to be preserved, give None alias key_aliases: Vec, } diff --git a/interactive_engine/executor/ir/runtime/src/process/operator/sink/sink.rs b/interactive_engine/executor/ir/runtime/src/process/operator/sink/sink.rs index 5c570c48f172..f0ae4ff70b30 100644 --- a/interactive_engine/executor/ir/runtime/src/process/operator/sink/sink.rs +++ b/interactive_engine/executor/ir/runtime/src/process/operator/sink/sink.rs @@ -224,7 +224,7 @@ impl RecordSinkEncoder { return NameOrId::Str(meta_name.clone()); } } - // if we can not find mapped meta_name, we return meta_id directly. + // if we cannot find mapped meta_name, we return meta_id directly. NameOrId::Id(meta_id) } diff --git a/interactive_engine/executor/store/exp_store/src/graph_db/graph_db_impl.rs b/interactive_engine/executor/store/exp_store/src/graph_db/graph_db_impl.rs index 2d5ca6187c60..61cacf86b05f 100644 --- a/interactive_engine/executor/store/exp_store/src/graph_db/graph_db_impl.rs +++ b/interactive_engine/executor/store/exp_store/src/graph_db/graph_db_impl.rs @@ -140,7 +140,7 @@ pub struct LargeGraphDB< pub(crate) topology: T, /// The schema of the vertex/edge property table pub(crate) graph_schema: Arc, - /// Table from internal vertexs' indices to their properties + /// Table from internal vertices' indices to their properties pub(crate) vertex_prop_table: N, /// Table from internal edges' indices to their properties pub(crate) edge_prop_table: E, @@ -605,7 +605,7 @@ pub struct MutableGraphDB< pub(crate) partition: usize, /// The graph structure, the label will be encoded as `LabelId` pub(crate) topology: T, - /// Table from internal vertexs' indices to their properties + /// Table from internal vertices' indices to their properties pub(crate) vertex_prop_table: N, /// Table from internal edges' indices to their properties pub(crate) edge_prop_table: E, @@ -923,7 +923,7 @@ mod test { .unwrap() .is_none()); - // The vertex PIDS[1] does no exist, can not update + // The vertex PIDS[1] does no exist, cannot update assert!(graphdb .add_or_update_vertex_properties(PIDS[1], prop.clone()) .is_err()); diff --git a/interactive_engine/executor/store/exp_store/src/graph_db/mod.rs b/interactive_engine/executor/store/exp_store/src/graph_db/mod.rs index 32837c936678..2c29b15cdfbe 100644 --- a/interactive_engine/executor/store/exp_store/src/graph_db/mod.rs +++ b/interactive_engine/executor/store/exp_store/src/graph_db/mod.rs @@ -185,8 +185,8 @@ impl<'a, G: IndexType, I: IndexType> LocalEdge<'a, G, I> { self } - /// An edge is uniquely indiced by its start/end vertex's global id, as well - /// as its internal id indiced from this start/end-vertex. + /// An edge is uniquely indexed by its start/end vertex's global id, as well + /// as its internal id indexed from this start/end-vertex. /// Whether this is a start/end vertex, is determined by `Self::from_start` pub fn get_edge_id(&self) -> EdgeId { if self.from_start { @@ -361,12 +361,12 @@ pub trait GlobalStoreUpdate { &mut self, global_src_id: G, global_dst_id: G, label_id: LabelId, properties: Row, ) -> GDBResult>; - /// Add (none-corner) vertexs in batches, where each item contains the following elements: + /// Add (none-corner) vertices in batches, where each item contains the following elements: /// * vertex's global id with type `G` /// * vertex's label id /// * vertex's property if any (corner vertex does not have any property) /// - /// Return the number of vertexs that were successfully added. If there is any error while + /// Return the number of vertices that were successfully added. If there is any error while /// attempting to add certain vertex, `PropertyError` will be thrown. fn add_vertex_batches>( &mut self, iter: Iter, diff --git a/interactive_engine/executor/store/exp_store/src/ldbc.rs b/interactive_engine/executor/store/exp_store/src/ldbc.rs index 1e9bdc5c45b8..7e97e90b48c4 100644 --- a/interactive_engine/executor/store/exp_store/src/ldbc.rs +++ b/interactive_engine/executor/store/exp_store/src/ldbc.rs @@ -45,7 +45,7 @@ pub static LDBC_SUFFIX: &'static str = "_0_0.csv"; /// A ldbc raw file uses | to split data fields pub static SPLITTER: &'static str = "|"; -/// A hdfs partitioned data starts wtih "part-" +/// A hdfs partitioned data starts with "part-" pub static PARTITION_PREFIX: &'static str = "part-"; /// Given a worker of ID `worker`, identify the files it will processed diff --git a/interactive_engine/executor/store/exp_store/src/parser.rs b/interactive_engine/executor/store/exp_store/src/parser.rs index 245a2dd6dd18..2ac2606eaaa7 100644 --- a/interactive_engine/executor/store/exp_store/src/parser.rs +++ b/interactive_engine/executor/store/exp_store/src/parser.rs @@ -222,7 +222,7 @@ mod test { let time11 = "19600410"; let time2 = "2012-07-21T07:59:14.322+0000"; let time3 = "20120721075914"; - // java miniseconds + // java milliseconds let time4 = "1316563200000"; let time5 = "628646400000"; diff --git a/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/global_store_ffi.h b/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/global_store_ffi.h index ac1fd52868e4..b9db1fd0aeb3 100644 --- a/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/global_store_ffi.h +++ b/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/global_store_ffi.h @@ -230,7 +230,7 @@ void v6d_free_properties_iterator(PropertiesIterator iter); // ----------------- property api -------------------- // -// 获取属性值,这里需要在c++里判断类型是否正确,比如:对stirng属性调用get_property_as_int就应该报错,返回-1表示错误,返回0表示正确。 +// 获取属性值,这里需要在c++里判断类型是否正确,比如:对string属性调用get_property_as_int就应该报错,返回-1表示错误,返回0表示正确。 // 如果类型是正确的,则把值填进out指针 int v6d_get_property_as_bool(struct Property* property, bool* out); int v6d_get_property_as_char(struct Property* property, char* out); diff --git a/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/graph_builder_ffi.cc b/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/graph_builder_ffi.cc index edee0601f64e..0cd6d735e244 100644 --- a/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/graph_builder_ffi.cc +++ b/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/graph_builder_ffi.cc @@ -244,7 +244,7 @@ void v6d_destroy(GraphBuilder builder) { LOG(INFO) << "destory: builder = " << builder; auto stream = static_cast *>(builder); - // delete the shared_ptr object on heap, it will then delete the holded + // delete the shared_ptr object on heap, it will then delete the held // object. delete stream; } diff --git a/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/graph_builder_ffi.h b/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/graph_builder_ffi.h index 0682dd8952b3..3fcd9c93c645 100644 --- a/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/graph_builder_ffi.h +++ b/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/graph_builder_ffi.h @@ -127,7 +127,7 @@ int v6d_add_edges(GraphBuilder builder, size_t edge_size, * 结束local GraphBuilder的build,点、边写完之后分别调用 */ int v6d_build(GraphBuilder builder); -// as an alias due for backwardscompatibility +// as an alias due for backwards compatibility int v6d_build_vertice(GraphBuilder builder); int v6d_build_vertices(GraphBuilder builder); int v6d_build_edges(GraphBuilder builder); diff --git a/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/property_graph_stream.h b/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/property_graph_stream.h index fd4ee4a29aa6..c97ee6c4fba1 100644 --- a/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/property_graph_stream.h +++ b/interactive_engine/executor/store/global_query/src/store_impl/v6d/native/property_graph_stream.h @@ -356,7 +356,7 @@ class PropertyGraphOutStream : public Registered { std::map> vertex_builders_; - // vertex label id to its primary key column (assuming only signle column key) ordinal mapping + // vertex label id to its primary key column (assuming only single column key) ordinal mapping // -1 means no primary key column static constexpr size_t kNoPrimaryKeyColumn = static_cast(-1); std::map vertex_primary_key_column_; diff --git a/interactive_engine/groot-module/src/main/java/com/alibaba/graphscope/groot/meta/MetaStore.java b/interactive_engine/groot-module/src/main/java/com/alibaba/graphscope/groot/meta/MetaStore.java index 916aee3448f2..5887d4fdf002 100644 --- a/interactive_engine/groot-module/src/main/java/com/alibaba/graphscope/groot/meta/MetaStore.java +++ b/interactive_engine/groot-module/src/main/java/com/alibaba/graphscope/groot/meta/MetaStore.java @@ -17,7 +17,7 @@ /** * Distributed, reliable storage used for SnapshotManager to persist Snapshot related meta. We can - * implement this interface with Zookeeper + * implement this interface with ZooKeeper */ public interface MetaStore { diff --git a/interactive_engine/pom.xml b/interactive_engine/pom.xml index f20ecc0a98a1..638aaff9f221 100644 --- a/interactive_engine/pom.xml +++ b/interactive_engine/pom.xml @@ -198,7 +198,7 @@ 4.9.1 3.16.3 - + 4.5.13 0.36.4-public diff --git a/interactive_engine/tests/src/main/java/com/alibaba/graphscope/function/test/RemoteTestGraphProvider.java b/interactive_engine/tests/src/main/java/com/alibaba/graphscope/function/test/RemoteTestGraphProvider.java index bdd1f0b37cc2..31d6373702f0 100644 --- a/interactive_engine/tests/src/main/java/com/alibaba/graphscope/function/test/RemoteTestGraphProvider.java +++ b/interactive_engine/tests/src/main/java/com/alibaba/graphscope/function/test/RemoteTestGraphProvider.java @@ -27,7 +27,7 @@ import java.util.Set; /** - * {@link AbstractGraphProvider} is privided by gremlin to adapt to its test framework + * {@link AbstractGraphProvider} is provided by gremlin to adapt to its test framework * {@link RemoteTestGraph} will be constructed by this provider */ public class RemoteTestGraphProvider extends AbstractGraphProvider { diff --git a/k8s/actions-runner-controller/manylinux/Dockerfile b/k8s/actions-runner-controller/manylinux/Dockerfile index bf973f5d86cc..5c3ce0558cc3 100644 --- a/k8s/actions-runner-controller/manylinux/Dockerfile +++ b/k8s/actions-runner-controller/manylinux/Dockerfile @@ -41,11 +41,11 @@ ENV HOME=/home/graphscope # - Run cp ../_package/actions-runner-linux-x64-2.280.3.tar.gz ../../actions-runner-controller/runner/ # - Beware that `2.280.3` might change across versions # -# See https://github.com/actions/runner/blob/main/.github/workflows/release.yml for more informatino on how you can use dev.sh +# See https://github.com/actions/runner/blob/main/.github/workflows/release.yml for more information on how you can use dev.sh # # If you're willing to uncomment the following line, you'd also need to comment-out the # && curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \ -# line in the next `RUN` command in this Dockerfile, to avoid overwirting this runner.tar.gz with a remote one. +# line in the next `RUN` command in this Dockerfile, to avoid overwriting this runner.tar.gz with a remote one. # COPY actions-runner-linux-x64-2.280.3.tar.gz /runnertmp/runner.tar.gz diff --git a/k8s/actions-runner-controller/manylinux/entrypoint.sh b/k8s/actions-runner-controller/manylinux/entrypoint.sh index 156605b2fee9..c01114e50ee6 100755 --- a/k8s/actions-runner-controller/manylinux/entrypoint.sh +++ b/k8s/actions-runner-controller/manylinux/entrypoint.sh @@ -89,7 +89,7 @@ if [[ "${UNITTEST:-}" == '' ]]; then fi cd ${RUNNER_HOME} -# past that point, it's all relative pathes from /runner +# past that point, it's all relative paths from /runner config_args=() if [ "${RUNNER_FEATURE_FLAG_EPHEMERAL:-}" == "true" -a "${RUNNER_EPHEMERAL}" == "true" ]; then @@ -144,7 +144,7 @@ cat .runner # } # # Especially `agentId` is important, as other than listing all the runners in the repo, -# this is the only change we could get the exact runnner ID which can be useful for further +# this is the only change we could get the exact runner ID which can be useful for further # GitHub API call like the below. Note that 171 is the agentId seen above. # curl \ # -H "Accept: application/vnd.github.v3+json" \ diff --git a/proto/coordinator_service.proto b/proto/coordinator_service.proto index 038709bd66ff..aa5a8ea88422 100644 --- a/proto/coordinator_service.proto +++ b/proto/coordinator_service.proto @@ -34,7 +34,7 @@ service CoordinatorService { // Closes a session. rpc CloseSession(CloseSessionRequest) returns (CloseSessionResponse); - // Distribute the specified libary to servers + // Distribute the specified library to servers rpc AddLib(AddLibRequest) returns (AddLibResponse); rpc CreateAnalyticalInstance (CreateAnalyticalInstanceRequest) returns (CreateAnalyticalInstanceResponse); diff --git a/proto/error_codes.proto b/proto/error_codes.proto index 7b9ca43e8838..c2e582dce8a2 100644 --- a/proto/error_codes.proto +++ b/proto/error_codes.proto @@ -24,11 +24,11 @@ enum Code { // Timeout, used when an operation fail to return result in an specific time. TIMEOUT_ERROR = 1; - // Required resources can not be found. + // Required resources cannot be found. NOT_FOUND_ERROR = 2; // Connection error with client. - // e.g. multiple client connnect to coordinator at the same time or + // e.g. multiple client connect to coordinator at the same time or // failed to launch engine locally. CONNECTION_ERROR = 3; diff --git a/proto/graph_def.proto b/proto/graph_def.proto index 02866143622e..1246b1c40441 100644 --- a/proto/graph_def.proto +++ b/proto/graph_def.proto @@ -60,7 +60,7 @@ message VineyardInfoPb { bool generate_eid = 6; // object id of the graph in vineyard int64 vineyard_id = 7; - // For client to reconstrut graph schema + // For client to reconstruct graph schema string property_schema_json = 8; // Global or local vertex map VertexMapTypePb vertex_map_type = 9; diff --git a/proto/types.proto b/proto/types.proto index 2155e7e63d02..3b71e50de076 100644 --- a/proto/types.proto +++ b/proto/types.proto @@ -105,7 +105,7 @@ enum OperationType { UNLOAD_CONTEXT = 23; // unload context ARCHIVE_GRAPH = 24; // archive graph SERIALIZE_GRAPH = 25; // serialize graph - DESERIALIZE_GRAPH = 26; // desrialize graph + DESERIALIZE_GRAPH = 26; // deserialize graph CONSOLIDATE_COLUMNS = 27; // consolidate property columns in the graph SUBGRAPH = 32; // subgraph in interactive query @@ -268,7 +268,7 @@ enum ParamKey { JVM_OPTS = 503; // opts str to start a jvm } -// For simulating networkx modifing functionalities +// For simulating networkx modifying functionalities enum ModifyType { NX_ADD_NODES = 0; NX_ADD_EDGES = 1; diff --git a/python/graphscope/__init__.py b/python/graphscope/__init__.py index a3ed3ea76005..079751face65 100644 --- a/python/graphscope/__init__.py +++ b/python/graphscope/__init__.py @@ -20,7 +20,7 @@ import platform import sys -# Tensorflow with Python 3.7 and ARM platform requires lower version of protobuf +# TensorFlow with Python 3.7 and ARM platform requires lower version of protobuf if (sys.version_info.major == 3 and sys.version_info.minor == 7) or ( platform.system() == "Linux" and platform.processor() == "aarch64" ): diff --git a/python/graphscope/analytical/udf/utils.py b/python/graphscope/analytical/udf/utils.py index 327d22e6fd53..fc5941d3d1ad 100644 --- a/python/graphscope/analytical/udf/utils.py +++ b/python/graphscope/analytical/udf/utils.py @@ -285,7 +285,7 @@ def read_bytes(self, raw=False): raw: bool If True, return the raw bytes. Otherwise return the BytesIO object. """ - # close the file first before reading bytes, close repeatly is OK. + # close the file first before reading bytes, close repeatedly is OK. self.zip_file.close() if raw: return self.in_memory_buffer.getvalue() diff --git a/python/graphscope/client/session.py b/python/graphscope/client/session.py index 6c9c26dd6c0d..ecdfbdbe4058 100755 --- a/python/graphscope/client/session.py +++ b/python/graphscope/client/session.py @@ -463,7 +463,7 @@ def __init__( a GraphScope session. """ - # supress the grpc warnings, see also grpc/grpc#29103 + # suppress the grpc warnings, see also grpc/grpc#29103 os.environ["GRPC_ENABLE_FORK_SUPPORT"] = "false" self._accessable_params = ( "addr", @@ -1034,7 +1034,7 @@ def store_to_pvc(self, graphIDs, path: str, pvc_name: str): namespace = self._config.kubernetes_launcher.namespace self._ensure_vineyard_deployment_exists(vineyard_deployment_name, namespace) self._ensure_pvc_exists(pvc_name, namespace) - # The next function will create a kubernetes job for backuping + # The next function will create a kubernetes job for backing up # the specific graphIDs to the specific path of the specific pvc vineyard.deploy.vineyardctl.deploy.backup_job( backup_name="vineyard-backup-" + random_string(6), diff --git a/python/graphscope/framework/context.py b/python/graphscope/framework/context.py index a8b14d8e9c11..c41604e0d4bf 100644 --- a/python/graphscope/framework/context.py +++ b/python/graphscope/framework/context.py @@ -83,7 +83,7 @@ class BaseContextDAGNode(DAGNode): >>> c = graphscope.sssp(sg, 20) >>> print(c) # >>> r1 = c.to_numpy("r") - >>> print(r1) # + >>> print(r1) # >>> r2 = c.to_dataframe({"id": "v.id", "result": "r"}) >>> r3 = c.to_vineyard_tensor("r") >>> r4 = c.to_vineyard_dataframe({"id": "v.id", "result": "r"}) diff --git a/python/graphscope/framework/dag.py b/python/graphscope/framework/dag.py index 07f24d9151c3..f2acb529cd41 100644 --- a/python/graphscope/framework/dag.py +++ b/python/graphscope/framework/dag.py @@ -76,7 +76,7 @@ def extract_subdag_for(self, ops): # leaf op handle # there are two kinds of leaf op: # 1) unload graph / app - # 2) networkx releated op + # 2) networkx related op if len(ops) == 1 and ops[0].is_leaf_op(): out.op.extend([ops[0].as_op_def()]) return out diff --git a/python/graphscope/framework/errors.py b/python/graphscope/framework/errors.py index 6e0cba8b8a53..164b22721d7c 100644 --- a/python/graphscope/framework/errors.py +++ b/python/graphscope/framework/errors.py @@ -21,7 +21,7 @@ from graphscope.proto import error_codes_pb2 from graphscope.proto import op_def_pb2 -# All kinds of Graphscope error. +# All kinds of GraphScope error. __all__ = [ "NotFoundError", "VineyardError", diff --git a/python/graphscope/framework/graph.py b/python/graphscope/framework/graph.py index 883c2bcf731e..59b49616042f 100644 --- a/python/graphscope/framework/graph.py +++ b/python/graphscope/framework/graph.py @@ -182,7 +182,7 @@ def _construct_op_from_vineyard_id(self, vineyard_id): config[types_pb2.VINEYARD_ID] = utils.i_to_attr(int(vineyard_id)) # FIXME(hetao) hardcode oid/vid type for codegen, when loading from vineyard # - # the metadata should be retrived from vineyard + # the metadata should be retrieved from vineyard config[types_pb2.OID_TYPE] = utils.s_to_attr("int64_t") config[types_pb2.VID_TYPE] = utils.s_to_attr("uint64_t") return dag_utils.create_graph( @@ -196,7 +196,7 @@ def _construct_op_from_vineyard_name(self, vineyard_name): config[types_pb2.VINEYARD_NAME] = utils.s_to_attr(str(vineyard_name)) # FIXME(hetao) hardcode oid/vid type for codegen, when loading from vineyard # - # the metadata should be retrived from vineyard + # the metadata should be retrieved from vineyard config[types_pb2.OID_TYPE] = utils.s_to_attr("int64_t") config[types_pb2.VID_TYPE] = utils.s_to_attr("uint64_t") return dag_utils.create_graph( diff --git a/python/graphscope/framework/graph_builder.py b/python/graphscope/framework/graph_builder.py index fc1a6e4cb07b..3ebf8a4ec2c3 100644 --- a/python/graphscope/framework/graph_builder.py +++ b/python/graphscope/framework/graph_builder.py @@ -170,7 +170,7 @@ def load_from( Defaults to False. """ - # Don't import the :code:`nx` in top-level statments to improve the + # Don't import the :code:`nx` in top-level statements to improve the # performance of :code:`import graphscope`. from graphscope import nx diff --git a/python/graphscope/gsctl/commands/__init__.py b/python/graphscope/gsctl/commands/__init__.py index 62aa7b85094e..dcda2954e109 100644 --- a/python/graphscope/gsctl/commands/__init__.py +++ b/python/graphscope/gsctl/commands/__init__.py @@ -29,7 +29,7 @@ def get_command_collection(context: Context): # default commands commands = click.CommandCollection(sources=[common, dev]) - # treat gsctl as an utility script, providing hepler functions or utilities + # treat gsctl as an utility script, providing helper functions or utilities # e.g. initialize and manage cluster, install the dependencies required to # build graphscope locally. if context is None: diff --git a/python/graphscope/nx/algorithms/builtin.py b/python/graphscope/nx/algorithms/builtin.py index af1f68c98572..2b1b27f64186 100644 --- a/python/graphscope/nx/algorithms/builtin.py +++ b/python/graphscope/nx/algorithms/builtin.py @@ -335,7 +335,7 @@ def _average_shortest_path_length(G, weight=None): if method is not None: return nxa.average_shortest_path_length(G, weight, method) n = len(G) - # For the specail case of the null graph. raise an exception, since + # For the special case of the null graph. raise an exception, since # there are no paths in the null graph. if n == 0: msg = ( @@ -920,7 +920,7 @@ def all_simple_paths(G, source, target_nodes, cutoff=None): """ paths = get_all_simple_paths(G, source, target_nodes, cutoff) - # delte path tail padding + # delete path tail padding for path in paths: for i in range(len(path) - 1, -1, -1): if path[i] == -1: diff --git a/python/graphscope/nx/generators/lattice.py b/python/graphscope/nx/generators/lattice.py index 6ceb12861eaf..4332cb28bf3f 100644 --- a/python/graphscope/nx/generators/lattice.py +++ b/python/graphscope/nx/generators/lattice.py @@ -166,7 +166,7 @@ def _dict_product(d1, d2): return {k: (d1.get(k), d2.get(k)) for k in set(d1) | set(d2)} -# Generators for producting graph products +# Generators for producing graph products def _node_product(G, H): for u, v in product(G, H): yield ((u, v), _dict_product(G.nodes[u], H.nodes[v])) diff --git a/python/graphscope/nx/generators/random_graphs.py b/python/graphscope/nx/generators/random_graphs.py index 2badd88082cf..81857e8ec2c6 100644 --- a/python/graphscope/nx/generators/random_graphs.py +++ b/python/graphscope/nx/generators/random_graphs.py @@ -535,7 +535,7 @@ def extended_barabasi_albert_graph(n, m, p, q, seed=None): # The available nodes do have a neighbor at least. neighbor_nodes = list(G[node]) - # Choosing the other end that will get dettached + # Choosing the other end that will get detached src_node = seed.choice(neighbor_nodes) # Picking a target node that is not 'node' or diff --git a/python/graphscope/nx/tests/classes/test_graphviews.py b/python/graphscope/nx/tests/classes/test_graphviews.py index 455154ab093a..1329559ccfd9 100644 --- a/python/graphscope/nx/tests/classes/test_graphviews.py +++ b/python/graphscope/nx/tests/classes/test_graphviews.py @@ -155,7 +155,7 @@ def test_subgraph_toundirected(self): assert edges[1] in ((5, 6), (6, 5)) def test_reverse_subgraph_toundirected(self): - # a view can not project subgraph in graphscope.nx + # a view cannot project subgraph in graphscope.nx G = self.DG.reverse() SG = G.subgraph([4, 5, 6]) SSG = SG.to_undirected() diff --git a/python/graphscope/nx/tests/test_transformation.py b/python/graphscope/nx/tests/test_transformation.py index 0c924437d7d9..d0994bee90fa 100644 --- a/python/graphscope/nx/tests/test_transformation.py +++ b/python/graphscope/nx/tests/test_transformation.py @@ -220,19 +220,19 @@ def test_complete_nx_to_gs(self): gs_g = g(nx_g) self.assert_convert_success(gs_g, nx_g) - # node property aliged, edge not aliged + # node property aligned, edge not aligned nx_g2 = nx_g.copy() nx_g2.add_edge(0, 1, ep4="new propery") gs_g2 = g(nx_g2) self.assert_convert_success(gs_g2, nx_g2) - # edge property aliged, node not aliged + # edge property aligned, node not aligned nx_g3 = nx_g.copy() nx_g3.add_node(2, vp4="new propery") gs_g3 = g(nx_g3) self.assert_convert_success(gs_g3, nx_g3) - # both not aliged + # both not aligned nx_g4 = nx_g.copy() nx_g4.add_edge(0, 1, ep4="new propery") nx_g4.add_node(2, vp4="new propery") @@ -361,7 +361,7 @@ def test_report_methods_on_copy_on_write_strategy(self): # test HAS_NODE and HAS_EDGE assert 0 not in G assert 933 in G - assert ("person", 933) not in G # deault node must be non-tuple format + assert ("person", 933) not in G # default node must be non-tuple format assert ("random", 933) not in G assert G.has_edge(933, 4398046511628) assert G.has_edge(("comment", 618475290625), ("post", 618475290624)) diff --git a/python/graphscope/nx/utils/compat.py b/python/graphscope/nx/utils/compat.py index c6e838c23509..8016e99d7e28 100644 --- a/python/graphscope/nx/utils/compat.py +++ b/python/graphscope/nx/utils/compat.py @@ -137,7 +137,7 @@ def load_the_module(module_or_name): def apply_networkx_patches(module): - # there'a some name conflicts in networkx and we need to be careful + # there are some name conflicts in networkx and we need to be careful # e.g., # # networkx.algorithms.approximation.connectivity @@ -189,7 +189,7 @@ def replace_module_context( # noqa: C901 decorators = [decorators] # get the caller's module, and since this function might be called by import_as_grape_nx - # in this module, we go up and findout the real caller. + # in this module, we go up and find out the real caller. for loc in inspect.stack()[1:]: mod = inspect.getmodule(loc[0]) if mod.__name__ != __name__: @@ -421,7 +421,7 @@ def patch_class_wrapper(target_class): if k not in global_ctx: global_ctx[k] = v - # run replacing module context again for accurance + # run replacing module context again for accuracy global_ctx = replace_context(global_ctx, source_module, target_module) fn = ( copy_property(meth, global_ctx) diff --git a/python/graphscope/tests/unittest/test_graph.py b/python/graphscope/tests/unittest/test_graph.py index 92460a7172f2..227faec10196 100644 --- a/python/graphscope/tests/unittest/test_graph.py +++ b/python/graphscope/tests/unittest/test_graph.py @@ -654,11 +654,11 @@ def test_add_column(ldbc_graph, arrow_modern_graph): g3 = sub_graph_2.add_column(ret, selector={"pr": "r"}) assert g3.schema.get_vertex_properties("person")[8].id == 8 assert g3.schema.get_vertex_properties("person")[8].name == "pr" - # the ret can not add to sub_graph_3 + # the ret cannot add to sub_graph_3 with pytest.raises(AnalyticalEngineInternalError): g4 = sub_graph_3.add_column(ret, selector={"pr": "r"}) print(g4.schema) - # the ret can not add to sub_graph_4 + # the ret cannot add to sub_graph_4 with pytest.raises(AnalyticalEngineInternalError): g5 = sub_graph_4.add_column(ret, selector={"pr": "r"}) print(g4.schema) diff --git a/python/graphscope/tests/unittest/test_udf_app.py b/python/graphscope/tests/unittest/test_udf_app.py index c6cc2e5e5469..b2b6aafcf6d9 100644 --- a/python/graphscope/tests/unittest/test_udf_app.py +++ b/python/graphscope/tests/unittest/test_udf_app.py @@ -775,7 +775,7 @@ def Init(v, context): pass @staticmethod - def Compute(v, context): # misssing message + def Compute(v, context): # missing message pass with pytest.raises(AssertionError, match="The number of parameters does not match"): diff --git a/python/jupyter/graphscope/MANIFEST.in b/python/jupyter/graphscope/MANIFEST.in index 27b7e0029ab2..fc18d77ae50c 100644 --- a/python/jupyter/graphscope/MANIFEST.in +++ b/python/jupyter/graphscope/MANIFEST.in @@ -24,7 +24,7 @@ graft examples graft tests prune tests/build -# Javascript files +# JavaScript files graft graphscope-jupyter/nbextension graft src graft css diff --git a/scripts/install_deps.sh b/scripts/install_deps.sh index 33ed84dd1e66..6d0d0c59a2e3 100755 --- a/scripts/install_deps.sh +++ b/scripts/install_deps.sh @@ -46,7 +46,7 @@ succ() { } ########################## -# Output useage information. +# Output usage information. # Globals: # None # Arguments: @@ -119,7 +119,7 @@ get_os_version() { readonly OS_VERSION } -# Functions to install dependencies of k8s evironment. +# Functions to install dependencies of k8s environment. check_os_compatibility_k8s() { if [[ "${IS_IN_WSL}" == true && -z "${WSL_INTEROP}" ]]; then err "The platform is WSL1. GraphScope not support to run on WSL1, please use WSL2." @@ -214,7 +214,7 @@ start_docker() { } ########################## -# Launch kubenetes cluster with kind. +# Launch kubernetes cluster with kind. # Globals: # None # Arguments: diff --git a/scripts/launch_cluster.py b/scripts/launch_cluster.py index 0598cf9861ba..64035af9692a 100755 --- a/scripts/launch_cluster.py +++ b/scripts/launch_cluster.py @@ -276,7 +276,7 @@ def get_vpc_stack(self, vpc_name): # A waiter is something which polls AWS to find out if an operation # has completed. waiter = self._cf.get_waiter("stack_create_complete") - # Wait for stack creation to complet + # Wait for stack creation to complete waiter.wait(StackName=vpc_name) except Exception as e: # If waiter fails, that'll be the thing taking too long to deploy.