diff --git a/config.toml b/config.toml index 5a02c1472c..8be46eee29 100644 --- a/config.toml +++ b/config.toml @@ -15,7 +15,14 @@ defaultContentLanguageInSubdir = false # Useful when translating. enableMissingTranslationPlaceholders = true -disableKinds = ["taxonomy", "taxonomyTerm"] +#disableKinds = ["taxonomy", "taxonomyTerm"] +[taxonomies] +projects = "projects" + +[params.taxonomy] +taxonomyCloud = ["projects"] +taxonomyCloudTitle = ["taxo.projects.title"] +taxonomyPageHeader = ["projects"] # i18n prefix taxo.page.header # Highlighting config pygmentsCodeFences = true @@ -50,26 +57,26 @@ anchor = "smart" contentDir = "content/en" title = "CloudWeGo" description = "A leading practice for building enterprise cloud native middleware!" -languageName ="English" +languageName = "English" # Weight used for sorting. weight = 1 [languages.zh] contentDir = "content/zh" title = "CloudWeGo" description = "A leading practice for building enterprise cloud native middleware!" -languageName ="中文" +languageName = "中文" # Weight used for sorting. weight = 1 [markup] - [markup.goldmark] - [markup.goldmark.renderer] - unsafe = true - [markup.highlight] - # See a complete list of available styles at https://xyproto.github.io/splash/docs/all.html - style = "tango" - # Uncomment if you want your chosen highlight style used for code blocks without a specified language - # guessSyntax = "true" +[markup.goldmark] +[markup.goldmark.renderer] +unsafe = true +[markup.highlight] +# See a complete list of available styles at https://xyproto.github.io/splash/docs/all.html +style = "tango" +# Uncomment if you want your chosen highlight style used for code blocks without a specified language +# guessSyntax = "true" # Everything below this are Site Params @@ -115,7 +122,7 @@ github_org = "https://github.com/cloudwego" # Uncomment this if you have a newer GitHub repo with "main" as the default branch, # or specify a new value if you want to reference another branch in your GitHub links -github_branch= "main" +github_branch = "main" # Enable Algolia DocSearch algolia_docsearch = true @@ -177,28 +184,28 @@ name = "License" url = "https://github.com/cloudwego/cloudwego.github.io/blob/main/LICENSE" [[params.links.user]] - name = "Lark" - url = "https://applink.feishu.cn/client/chat/chatter/add_by_link?link_token=693v2544-2664-4421-b50f-7f1912p745r6" - icon = "fab fa-lark" - desc = "Chat with other CloudWeGo users by lark" +name = "Lark" +url = "https://applink.feishu.cn/client/chat/chatter/add_by_link?link_token=693v2544-2664-4421-b50f-7f1912p745r6" +icon = "fab fa-lark" +desc = "Chat with other CloudWeGo users by lark" [[params.links.user]] - name = "Twitter" - url = "https://twitter.com/CloudWeGo" - icon = "fab fa-twitter" - desc = "Follow us on Twitter to get the latest news!" +name = "Twitter" +url = "https://twitter.com/CloudWeGo" +icon = "fab fa-twitter" +desc = "Follow us on Twitter to get the latest news!" [[params.links.developer]] - name = "GitHub" - url = "https://github.com/cloudwego" - icon = "fab fa-github" - desc = "Development takes place here!" +name = "GitHub" +url = "https://github.com/cloudwego" +icon = "fab fa-github" +desc = "Development takes place here!" [[params.links.developer]] - name = "Slack" - url = "https://join.slack.com/t/cloudwego/shared_invite/zt-tmcbzewn-UjXMF3ZQsPhl7W3tEDZboA" - icon = "fab fa-slack" - desc = "Chat with other project developers" +name = "Slack" +url = "https://join.slack.com/t/cloudwego/shared_invite/zt-tmcbzewn-UjXMF3ZQsPhl7W3tEDZboA" +icon = "fab fa-slack" +desc = "Chat with other project developers" [outputFormats] [outputFormats.PRINT] diff --git a/content/en/blog/_index.md b/content/en/blog/_index.md index 5391ed83da..43bd7ecfd7 100644 --- a/content/en/blog/_index.md +++ b/content/en/blog/_index.md @@ -1,6 +1,7 @@ --- title: "Blog" linkTitle: "Blog" +projects: [] menu: main: weight: 20 diff --git a/content/en/blog/news/1st_RPCKitex/index.md b/content/en/blog/news/1st_RPCKitex/index.md index 77c34d4668..ae5502de7d 100644 --- a/content/en/blog/news/1st_RPCKitex/index.md +++ b/content/en/blog/news/1st_RPCKitex/index.md @@ -1,16 +1,20 @@ --- date: 2022-09-30 title: "Kitex: Unifying Open Source Practice for a High-Performance RPC Framework" +projects: ["Kitex"] linkTitle: "Kitex: Unifying Open Source Practice for a High-Performance RPC Framework" keywords: ["CloudWeGo", "RPC framework", "Kitex", "microservice framework", "ByteDance Open Source", "open source"] description: "This article provides an overview of CloudWeGo - Kitex, a high-performance RPC framework, including its origins, development history, and the progress made since its open-source release a year ago. It covers the evolution of functional features, contributions from the community to the ecosystem, and successful implementation practices by enterprises. It highlights the growth and improvements Kitex has undergone, showcasing its commitment to delivering a robust and efficient solution for RPC communication in various scenarios." author: CloudWeGo Team --- + ## From Development to Open Source Transition + Many researchers and practitioners may have just learned about CloudWeGo, so let's first introduce the relationship between CloudWeGo and [Kitex](https://github.com/cloudwego/kitex). ## CloudWeGo and Kitex -Kitex is CloudWeGo's first open-source microservice framework, designed to empower developers in building high-performance and extensible microservices using Golang. Kitex encompasses the entire stack, including the network library, serialization library, and framework implementation, making it a comprehensive self-developed RPC framework. + +Kitex is CloudWeGo's first open-source microservice framework, designed to empower developers in building high-performance and extensible microservices using Golang. Kitex encompasses the entire stack, including the network library, serialization library, and framework implementation, making it a comprehensive self-developed RPC framework. One notable feature of Kitex is its support for the gRPC protocol. Leveraging the official gRPC source code, Kitex optimizes the gRPC implementation, resulting in superior performance compared to the official gRPC framework. This sets Kitex apart from other Golang frameworks that offer open-source support for the gRPC protocol. Developers seeking both gRPC functionality and high-performance capabilities will find Kitex to be an excellent choice. @@ -21,6 +25,7 @@ Based on feedback from the community, there have been discussions surrounding wh ![image](/img/blog/Kitex_architecture_explained_en/2.png) ## Kitex Development History + In 2014, ByteDance began adopting Golang as a programming language. The internal services of ByteDance were established in 2015, where the Thrift protocol was chosen for RPC (Remote Procedure Call) scenarios, and an internal RPC framework was supported. In 2016, the first Golang RPC framework called Kite was officially launched. During the initial stages of rapid company growth, the primary focus is on quickly implementing requirements and addressing relatively simple scenarios. Therefore, there may not be extensive considerations in the design process. This approach is reasonable since the exploration phase lacks complete clarity on which scenarios will require support, and excessive consideration can lead to over-design issues. As business scenarios became more complex, the demand for diversified functionalities increased, resulting in a rise in the number of access services and calls each year. Kite, the initial Golang RPC framework, eventually proved inadequate to support subsequent iterations. Recognizing this, a new project called Kitex was initiated in 2019, following over three years of online service. The official version of Kitex was released in early 2020, and by the end of the same year, over 10,000 services within Byte were connected to Kitex, showcasing its widespread adoption. @@ -50,6 +55,7 @@ After successfully open-sourcing the gopkg library, we made necessary code adjus By providing insights into Kitex's development and open-source history, our aim is to address concerns that external developers may have regarding whether Kitex is a KPI project. We want to assure them that Kitex is a community-driven open-source project backed by our commitment to stability, compatibility, and continuous improvement. ### The Value of Open Source + Towards the end of the first part, let's briefly discuss the value that open source brings to us. Although Kitex was not initially developed solely for open source purposes, its implementation has been oriented towards open source from the start. **Kitex** itself is a project that has undergone extensive internal implementation within our organization. By open sourcing Kitex, our aim is to enable more users to swiftly build microservices internally. At the same time, open source allows us to gather valuable feedback from communities and enterprises. It also attracts external developers to contribute their expertise and insights. This collective engagement helps drive the evolution of Kitex towards supporting multiple scenarios and enriching its capabilities, making it applicable to a wider range of contexts and organizations. @@ -58,27 +64,35 @@ This symbiotic process of open source fosters a positive cycle of mutual benefit ![image](/img/blog/Kitex_architecture_explained_en/6.png) ## A Year-long Review of Open Source Changes + ### Framework Metrics + Before delving into the one-year open source changes of Kitex, let us first discuss the key metrics that should be considered when choosing a framework. ### Scalability + A framework's scalability is crucial in determining its suitability for different platforms. If a framework is tightly coupled with internal capabilities and cannot be easily transplanted or expanded to support various scenarios, it may present challenges when used externally. ### Usability + The ease of use of a framework can be evaluated from two perspectives. Firstly, for business developers, a framework that requires meticulous attention to its internal details may not be suitable for teams with high research and development efficiency requirements. Secondly, for framework-oriented secondary developers who provide custom support, a framework with excessive expansion capabilities or insufficient scalability may impose limitations and high expansion costs. ### Richness of Functions + While a framework can be customized based on extensibility, it is important to consider that not all developers have the capacity for extensive custom development. An ideal framework should offer a range of options for different expansion capabilities, allowing developers to select and combine them according to their underlying infrastructure and specific environment. ### High Performance + While the preceding three points are crucial considerations during the initial framework selection, as service scale and resource consumption increase, performance becomes an indispensable factor. It is imperative to prioritize performance when choosing a framework to avoid future issues such as the need for framework replacement or forced customized maintenance. Regarding the measurement indicators mentioned above, Kitex may not have achieved perfection in all areas, but these four elements have been carefully considered during its design and implementation. We are committed to ensuring a well-rounded framework that addresses these aspects without compromising on any one of them. ## Features + The following is an overview of several significant functional features that have been introduced in Kitex's open source journey over the past year. ### Proxyless + Proxyless is a feature in Kitex that caters to open source scenarios. During the initial stages of Kitex's open source release, there were internal discussions on whether to support xDS integration with [Istio](https://github.com/istio/istio). For external users, leveraging Istio allows for the quick establishment of a basic microservices architecture, resolving issues such as service discovery, traffic routing, and configuration delivery. However, utilizing the complete Istio solution necessitates the introduction of Envoy, which can increase operational and maintenance costs. Moreover, using the official Envoy solution directly may result in performance degradation, additional CPU overhead, and increased latency. If Kitex can directly connect to Istio, users would be able to benefit from some of Istio's capabilities while avoiding the performance loss, deployment complexity, and maintenance costs associated with Envoy. However, in the early days of open source, we did not encounter clear user demands, so we did not provide high-quality support for this. @@ -87,6 +101,7 @@ If Kitex can directly connect to Istio, users would be able to benefit from some Later on, the gRPC team also introduced Proxyless support, and Istio officials adopted Proxyless as a recommended approach for Istio usage. Kitex has now implemented support for Proxyless, primarily focusing on service discovery integration. The extensions supported by xDS have been open sourced separately in the [kitex-contrib/xds](https://github.com/kitex-contrib/xds) library and will undergo further enhancements in the future. To learn how to use Kitex to connect with Istio, please refer to the [README](https://github.com/istio/istio/blob/master/README.md) documentation. ### JSON and Protobuf generalized Call Support + Initially, Kitex provided support for HTTP generalization in gateway scenarios, as well as Map and binary generalization for common service scenarios. However, after open sourcing Kitex, user feedback highlighted the need for JSON and Protobuf generalization, leading to their subsequent implementation. The generalization of Protobuf is also used in API gateway scenarios. While the original data format for HTTP generalization is JSON, the serialization of JSON can be bulky and inefficient, which negatively impacts performance. As a result, many mobile interfaces opt to transmit data using Protobuf due to its more compact representation. To address this demand, Kitex now includes support for Protobuf generalization. @@ -97,6 +112,7 @@ Currently, Kitex's generalization primarily focuses on the back-end Thrift servi Now, you may wonder why the generalization is implemented on the calling side instead of the server side. Typically, when we think of generalization, we imagine the server parsing and processing the generalized request, with the caller providing a corresponding generalized client. However, generalization comes with a certain cost, making it less suitable for regular RPC scenarios. Moreover, generalization is meant for all back-end services, including those written in different languages like Golang, Java, C++, Python, Rust, and more. If every language framework had to support generalization, the cost would be significantly high. Additionally, achieving convergence across different language frameworks is a lengthy process. Considering these factors, Kitex supports generalization on the calling side. This approach allows for greater flexibility and enables users to take advantage of generalization selectively based on their specific needs. ### Enhanced Retry Capability + When Kitex was open sourced last year, it already supported the retry function. Initially, there were two types of retries available: timeout retry and Backup Request. For timeout retry, only the timeout exception was retried. However, to further improve the success rate of requests, users expressed the need to retry other exceptions or based on specific user-defined status codes. It became evident that supporting only timeout retry was insufficient to meet user requirements. In response, Kitex introduced retries with specified results. Users can now specify other exceptions or a particular type of response for which they want retries, and the framework will retry according to the specified results. @@ -105,15 +121,18 @@ The example below illustrates the usage of request granularity retry configurati ![image](/img/blog/Kitex_architecture_explained_en/9.png) ### Thrift Validator + Thrift-gen-validator is a tool plug-in for Thriftgo, that enhances the code generation process. It allows users to describe and enforce constraints on the generated `struct`'s `IsValid()` error method based on annotations defined in the Thrift IDL. This ensures the legality of field values. Usually when making an RPC call, the user may verify the validity of some fields. If the user directly writes these verification codes, the investment cost will be high. To address this, we provide annotation support. As long as users define annotations in IDL according to the specified format, Kitex can help users generate verification code. The example below demonstrates the usage of code generation commands and an IDL annotation definition. By specifying the Thrift Validator plugin during code generation, our tool will parse the annotations and generate the required validation code. We are also currently contributing the Thrift Validator functionality to Apache Thrift. ![image](/img/blog/Kitex_architecture_explained_en/10.png) ## Performance Optimization + After highlighting the important functional features, let's move on to discussing several performance optimization features. ### Thrift High-Performance Codec + [Frugal](https://github.com/cloudwego/frugal) is a dynamic Thrift codec that offers high-performance capabilities by leveraging Just-in-Time (JIT) compilation, eliminating the need for code generation. While we have already optimized the official Thrift codec and introduced FastThrift as part of our pre-open source optimization efforts, we wanted to further enhance performance by incorporating the design principles from our open source high-performance JSON library, Sonic. As a result, we have implemented the Thrift JIT codec in Frugal. The table below illustrates a performance comparison between Frugal, combined with Kitex, and FastThrift. ![image](/img/blog/Kitex_architecture_explained_en/frugal.png) @@ -123,6 +142,7 @@ It is evident that Frugal offers superior RPC performance in most scenarios. In To learn how to use Frugal in conjunction with Kitex, you can refer to the repository's [Readme](https://github.com/cloudwego/frugal#readme) file. users can also utilize Frugal as a standalone high-performance codec for Thrift. In the future, [Kitex](https://github.com/cloudwego/kitex) may consider incorporating Frugal as the default codec option. ### Protobuf High-Performance Codec + We primarily focused on supporting Thrift internally; however, we recognized that external users are more inclined towards using Protobuf or gRPC after the open-source release. Consequently, taking inspiration from Kitex FastThrift's optimization approach, we re-implemented the generated code for Protobuf. Starting from version v0.4.0, if users employ Kitex tools to generate Protobuf code, the default generation will include Fastpb codec code. Furthermore, when initiating RPC calls, Kitex will also utilize [Fastpb](https://github.com/cloudwego/fastpb) as the default serialization option. @@ -133,7 +153,7 @@ The figure below illustrates a performance comparison between Fastpb and the off In the early days of open sourcing Kitex, our focus on stability and performance optimization for gRPC was relatively limited, as there were fewer internal use cases. However, after receiving feedback from numerous external users, we made dedicated efforts to address issues and optimize the performance of gRPC. In the middle of this year, we officially contributed these optimizations to the open-source library, which was released in version v0.4.0. -The figure below provides a comparison of unary request throughput between Kitex-gRPC and the official gRPC framework before and after optimization. On the left side, you can see the throughput comparison before optimization. +The figure below provides a comparison of unary request throughput between Kitex-gRPC and the official gRPC framework before and after optimization. On the left side, you can see the throughput comparison before optimization. The figure below provides a comparison of unary request throughput between Kitex-gRPC and the official gRPC framework before and after optimization. On the left side, you can see the throughput comparison before optimization. At relatively low concurrency, Kitex's throughput does not exhibit an advantage over the official gRPC framework. However, when using Fastpb, Kitex's throughput performance improves compared to the pre-optimization stage. Despite this improvement, the low-concurrency throughput is still lower than that of the official gRPC framework. On the right side of the figure, you can observe the throughput comparison after optimization. The throughput has increased by 46% - 70% compared to the pre-optimization stage, and when compared to the official gRPC framework, the throughput has increased by 51% - 70%. ![image](/img/blog/Kitex_architecture_explained_en/13.png) @@ -155,32 +175,37 @@ Since its open-source release, we have been thrilled by the enthusiastic respons While we acknowledge the need for further enrichment in our docking capabilities, we are proud to state that, in conjunction with our existing support, Kitex already possesses the necessary features to facilitate the construction of microservice architectures for external users. ![image](/img/blog/Kitex_architecture_explained_en/16.png) - We extend our heartfelt appreciation to the developers who have actively contributed to the growth of the CloudWeGo community. To explore the extensive ecosystem surrounding Kitex, we invite you to visit the [kitex-contrib](https://github.com/kitex-contrib) repository in our open-source warehouse. ### Working with External Companies + Our primary goal with the open-source release of Kitex was to assist external companies in swiftly establishing enterprise-level cloud-native architectures. Since then, we have been delighted to receive interest and engagement from notable organizations such as Semir, Huaxing Securities, Tanwan Games, and Heduo Technology. Their valuable feedback and specific requirements have shed light on unique usage scenarios and challenges distinct from our internal use cases, necessitating our attention, support, and optimization efforts. We are thrilled to witness the successful application of Kitex in these enterprise environments. In fact, during the CloudWeGo Meetup held on June 25th of this year, R&D professionals from [Semir](https://mp.weixin.qq.com/s/JAurW4P2E3NIduFaVY6jew) and [Huaxing Securities](https://mp.weixin.qq.com/s/QqGdzp-7rTdlxedy6bsXiw) shared their internal experiences and practical use cases, further validating the effectiveness and value of Kitex in real-world scenarios. ![image](/img/blog/Kitex_architecture_explained_en/17.png) -In addition to the above companies, we have also provided consultation to private inquiries from various organizations regarding usage issues. We are very grateful for the support and feedback from these corporate users. As mentioned earlier, gathering feedback from the community and enterprises plays a crucial role in driving the evolution of Kitex to support a wide range of scenarios. If enterprise users have any specific needs or requirements, we encourage them to reach out to us. +In addition to the above companies, we have also provided consultation to private inquiries from various organizations regarding usage issues. We are very grateful for the support and feedback from these corporate users. As mentioned earlier, gathering feedback from the community and enterprises plays a crucial role in driving the evolution of Kitex to support a wide range of scenarios. If enterprise users have any specific needs or requirements, we encourage them to reach out to us. ## How to use Kitex to Integrate with Existing Infrastructure -Here is a brief introduction on how to use Kitex to integrate with your internal infrastructure. Let's take ByteDance as an example, there are extensions in the open source library within the internal warehouse. These extensions are designed to integrate internal capabilities specific to ByteDance. Within the BytedSuite, Kitex can be initialized to cater to various scenarios. Users simply need to add an option configuration while constructing the Client and Server components to achieve seamless integration. To ensure a hassle-free experience, we have incorporated this configuration within the generated scaffolding code. This means that users no longer need to specifically focus on integrating internal capabilities. Furthermore, we plan to share details about how this configuration is embedded in the generated code. By doing so, secondary developers working with external frameworks will be able to provide integration capabilities to business development teams in a similar manner. + +Here is a brief introduction on how to use Kitex to integrate with your internal infrastructure. Let's take ByteDance as an example, there are extensions in the open source library within the internal warehouse. These extensions are designed to integrate internal capabilities specific to ByteDance. Within the BytedSuite, Kitex can be initialized to cater to various scenarios. Users simply need to add an option configuration while constructing the Client and Server components to achieve seamless integration. To ensure a hassle-free experience, we have incorporated this configuration within the generated scaffolding code. This means that users no longer need to specifically focus on integrating internal capabilities. Furthermore, we plan to share details about how this configuration is embedded in the generated code. By doing so, secondary developers working with external frameworks will be able to provide integration capabilities to business development teams in a similar manner. ![image](/img/blog/Kitex_architecture_explained_en/18.png) ## Summary and Future Perspectives + ### Summarize + This blog introduces the following key points: + 1. The transition of Kitex from an internally used framework to an open-source framework while ensuring compatibility between internal and external versions. 2. Overview of important functional features and performance optimizations released during the past year of open source. 3. The origination and development of Kitex's ecosystem with contributions from the community, examples of enterprise adoption, and elegant integration of internal capabilities using Kitex. ### Future Perspectives + 1. Collaborate with the community to further enrich the ecosystem and foster active participation from developers. 2. Enhance the usability of Kitex by incorporating engineering practices and providing greater convenience for microservice developers. 3. Continuously improve the BDThrift ecosystem and optimize support for Protobuf and gRPC. 4. Explore and implement additional feature support and open sourcing, such as ShmIPC (shared memory IPC), QUIC (Quick UDP Internet Connections), and generalization for Protobuf. -By pursuing these goals, Kitex aims to meet the evolving needs of users and further strengthen its position as a reliable and efficient framework for building cloud-native architectures. \ No newline at end of file +By pursuing these goals, Kitex aims to meet the evolving needs of users and further strengthen its position as a reliable and efficient framework for building cloud-native architectures. diff --git a/content/en/blog/news/Hertz_Benchmark/index.md b/content/en/blog/news/Hertz_Benchmark/index.md index 66597674ff..456f03187a 100644 --- a/content/en/blog/news/Hertz_Benchmark/index.md +++ b/content/en/blog/news/Hertz_Benchmark/index.md @@ -1,6 +1,7 @@ --- date: 2023-02-24 title: "Getting Started with Hertz: Performance Testing Guide" +projects: ["Hertz"] linkTitle: "Getting Started with Hertz: Performance Testing Guide" keywords: ["CloudWeGo", "Hertz", "HTTP framework", "performance testing"] description: "The purpose of this blog is to share the scenarios and technical issues that developers need to know when they need to conduct load testing on Hertz. diff --git a/content/en/blog/news/Hertz_Open_Source/index.md b/content/en/blog/news/Hertz_Open_Source/index.md index 25840e5997..db47fa7b3c 100644 --- a/content/en/blog/news/Hertz_Open_Source/index.md +++ b/content/en/blog/news/Hertz_Open_Source/index.md @@ -1,6 +1,7 @@ --- date: 2022-06-21 title: "Hertz, an Ultra Large Scale Enterprise-Level Microservice HTTP Framework, is Now Officially Open Source!" +projects: ["Hertz"] linkTitle: "Hertz, an Ultra Large Scale Enterprise-Level Microservice HTTP Framework, is Now Officially Open Source!" keywords: ["CloudWeGo", "http framework", "large scale high performance", "Hertz", "ByteDance Open Source", "open source"] description: "This article introduces Hertz, the official open source ultra-large-scale enterprise-level microservice HTTP framework developed by ByteDance" diff --git a/content/en/blog/news/Introducing CloudWeGo/index.md b/content/en/blog/news/Introducing CloudWeGo/index.md index 0974d4b08e..781942ad27 100644 --- a/content/en/blog/news/Introducing CloudWeGo/index.md +++ b/content/en/blog/news/Introducing CloudWeGo/index.md @@ -1,15 +1,19 @@ --- date: 2023-06-15 title: "CloudWeGo: A leading practice for building enterprise cloud native middleware!" +projects: ["CloudWeGo"] linkTitle: "CloudWeGo: A leading practice for building enterprise cloud native middleware!" keywords: ["CloudWeGo", "middleware", "Kitex", "microservice framework", "ByteDance Open Source", "open source", "cloud native"] description: "This article provides an overview of CloudWeGo" author: Vini Jaiswal --- + ## CloudWeGo Overview -[CloudWeGo](https://www.cloudwego.io/) is a set of microservices middleware developed by ByteDance that can be used to quickly build enterprise-class cloud-native architectures. It is a collection of high-performance, high-extensible, and highly-reliable projects that are focused on microservices communication and governance. It contains many components, including the RPC framework [Kitex](https://github.com/cloudwego/kitex), the HTTP framework [Hertz](https://github.com/cloudwego/hertz), the basic network library Netpoll, inter-process communication library [shmipc](https://github.com/cloudwego/shmipc-go), Rust-based RPC framework [Volo](https://github.com/cloudwego/volo) etc. + +[CloudWeGo](https://www.cloudwego.io/) is a set of microservices middleware developed by ByteDance that can be used to quickly build enterprise-class cloud-native architectures. It is a collection of high-performance, high-extensible, and highly-reliable projects that are focused on microservices communication and governance. It contains many components, including the RPC framework [Kitex](https://github.com/cloudwego/kitex), the HTTP framework [Hertz](https://github.com/cloudwego/hertz), the basic network library Netpoll, inter-process communication library [shmipc](https://github.com/cloudwego/shmipc-go), Rust-based RPC framework [Volo](https://github.com/cloudwego/volo) etc. ## CloudWeGo Background + ByteDance uses Golang as its main development language, and supports the reliable communication of tens of thousands of Golang microservices. With our experience in microservices having undergone a massive traffic, we decided to offer open source software in order to enrich the community’s ecology and launched CloudWeGo in September 2021. CloudWeGo is not only an external open source project, but also a real ultra-large-scale enterprise-level project. We are looking forward to enriching the Golang product system of the cloud native community through CloudWeGo and helping other companies to build cloud-native architectures in a rapid and convenient way. We also hope to attract developers in the open source community, to maintain and improve this project together, provide support for multiple scenarios, and enrich product capabilities. Because the projects under CloudWeGo depend on many internal basic tool libraries, we also open sourced the basic Golang tool libraries used internally, and maintain them in [bytedance/gopkg](https://github.com/bytedance/gopkg). Another language that we are committed to advancing is the Rust language that delivers exceptional performance, safety, and low-level control capabilities. Through our open-source projects and contributions, ByteDance aims to provide developers, enterprises, and Rustaceans with robust support in developing RPC microservices and building cloud-native distributed systems. ByteDance's contribution includes the development of Volo, a lightweight, high-performance, scalable, and user-friendly Rust RPC framework. Leveraging the latest features of Rust, Volo showcases exceptional performance and efficiency. ByteDance has extensively used Volo within its own infrastructure, implementing multiple business and foundational components, surpassing expectations and highlighting its superiority compared to similar solutions written in other languages. Another project is Monoio, a thread-per-core Rust runtime with io_uring/epoll/kqueue. Monoio is designed to offer maximum efficiency and performance by leveraging advanced features of Rust and a unique IO abstraction that minimizes copying. Its inclusion within CloudWeGo ensures robust support for various scenarios and enhances the overall capabilities of the project. @@ -17,59 +21,77 @@ Another language that we are committed to advancing is the Rust language that de ByteDance's dedication to Rust extends beyond Volo and Monoio. Through our commitment to simplicity and user-friendly tools, such as the Volo command-line tool, ByteDance actively contributes to lowering the barriers for developers to adopt Rust and leverage its full potential. ## Key Features + Some of the key features of CloudWeGo include: -### **High performance**: + +### **High performance**: + Highly performant nature of CloudWeGo projects stems from our implementation of asynchronous RPC, streaming RPC, event-driven programming, and support for protocols like HTTP/2. These design choices and features work together to deliver superior speed, responsiveness, and efficiency, enabling CloudWeGo projects to handle demanding workloads and achieve excellent performance benchmarks. -### **High extensibility**: + +### **High extensibility**: + CloudWeGo is designed to allow users to customize and expand its functionality according to their specific requirements. CloudWeGo achieves this by providing a modular or layered design that offers a set of interfaces and default implementation options. By utilizing a modular design, CloudWeGo, as seen in Kitex and Hertz, allows users to extend or inject their own implementations into the framework. This means that developers have the flexibility to tailor the behavior of the framework to suit their needs. They can replace or enhance default implementations with their own custom implementations, enabling them to adapt CloudWeGo to specific use cases or integrate seamlessly with other libraries and tools. -### **High reliability**: -CloudWeGo prioritizes stability and dependability in its projects, providing a reliable framework for developers and enterprises. Through rigorous quality assurance, including code review and testing, potential issues are identified and addressed early on before they impact production environments. CloudWeGo as a whole emphasizes the prevention of any potential losses or disruptions. This is achieved through careful PR integration, extensive testing, and continuous monitoring. CloudWeGo optimizes projects to handle high workloads, ensuring stability and reliability even under heavy load. Feedback from the community helps drive improvements and prompt issue resolution. By adhering to strict quality standards, CloudWeGo projects strive to deliver stable and reliable software. -### **Microservices communication and governance**: + +### **High reliability**: + +CloudWeGo prioritizes stability and dependability in its projects, providing a reliable framework for developers and enterprises. Through rigorous quality assurance, including code review and testing, potential issues are identified and addressed early on before they impact production environments. CloudWeGo as a whole emphasizes the prevention of any potential losses or disruptions. This is achieved through careful PR integration, extensive testing, and continuous monitoring. CloudWeGo optimizes projects to handle high workloads, ensuring stability and reliability even under heavy load. Feedback from the community helps drive improvements and prompt issue resolution. By adhering to strict quality standards, CloudWeGo projects strive to deliver stable and reliable software. + +### **Microservices communication and governance**: + CloudWeGo's Governance feature encompasses service governance modules such as service registry, discovery, load balancing, circuit breaker, rate limiting, retry, monitoring, tracing, logging, and diagnosis. These modules enhance the management, control, and reliability of services within the CloudWeGo framework. They enable dynamic service discovery, load distribution, fault tolerance, performance optimization, and comprehensive monitoring and diagnostics. The Governance feature ensures efficient and reliable service operations in the CloudWeGo ecosystem. Here are some of the benefits of using CloudWeGo: + - Speed: CloudWeGo offers the ability to accelerate application development by providing a set of pre-built components and libraries. These components and libraries provide essential functionalities, such as networking, database integration, security, and more. They are designed to be easily integrated into applications, reducing the need for building complex functionalities from scratch. With CloudWeGo's speedy development ecosystem, developers can focus on their application's core logic, leverage existing components, and deliver robust solutions in a shorter timeframe. This enables faster time-to-market, increased development efficiency, and ultimately enhances the overall speed of application development. - Cost savings: By adopting CloudWeGo and its projects such as Kitex, Hertz, Sonic, and others, users can benefit from significant cost savings. This is achieved through reduced CPU and memory consumption compared to older frameworks or similar projects. CloudWeGo's modern framework is designed to be highly optimized and efficient, resulting in minimized software development overhead. Internally at ByteDance, we moved to using the modern high-performance framework that has proven instrumental in saving substantial resources. With CloudWeGo, users can optimize their resource allocation and achieve cost efficiency while leveraging the powerful capabilities and features provided by the framework. - Security: CloudWeGo prioritizes security, offering a range of features and measures to ensure application security. The framework incorporates secure design principles, implementing industry best practices. It provides built-in authentication and authorization mechanisms for secure user access control. By leveraging the Rust programming language, CloudWeGo benefits from its inherent security advantages, such as strong type safety and memory safety, reducing the risk of common vulnerabilities. The open-source nature of CloudWeGo allows for community contributions and wider security audits, ensuring continuous improvement and prompt vulnerability mitigation. With CloudWeGo, developers can build applications with confidence, knowing that security is prioritized at every level of the framework. ## Use Cases + CloudWeGo can be used to build a variety of applications, including microservices-based applications, cloud-native applications, real-time applications, IoT applications, and other applications. Some of the applications include: + - Microservices-based applications: CloudWeGo provides a comprehensive set of features for microservices communication and governance, such as service discovery, routing, and orchestration. This makes it easy to build and manage microservices-based applications. - Cloud-native applications: CloudWeGo is designed to be used in cloud-native environments. It can be used to build applications that are scalable, reliable, and secure. - Real-time applications: CloudWeGo supports streaming and asynchronous messaging. This makes it a good choice for building real-time applications, such as chat applications and streaming media applications. - Other applications: CloudWeGo can be used to build a variety of other applications, such as web applications, mobile applications, gaming applications and enterprise applications. Let's explore some of the industry adoption: - - **Gaming**: CloudWeGo offers several benefits for the gaming industry. It enables game developers to scale their infrastructure to handle increased player demands, optimize performance, and simplify their business logic through service splitting. With components like current limiting, monitoring, and service registration/discovery, CloudWeGo ensures efficient resource utilization, enhanced responsiveness, and seamless coordination between game components. Additionally, the integration with OpenTelemetry provides valuable monitoring and diagnostics capabilities for developers to optimize their game services and deliver an exceptional gaming experience. Overall, CloudWeGo empowers the gaming industry by providing a scalable, efficient, and streamlined infrastructure for game development and operations. - - **Security**: In the security industry, CloudWeGo's adoption, specifically through the Kitex framework, brings significant benefits. Organizations can establish observability systems to monitor and analyze the performance of security services. This enables the identification of potential issues and ensures smooth operation. CloudWeGo also offers solutions for service stress testing, allowing organizations to optimize performance and ensure stability during high-load scenarios. Additionally, CloudWeGo addresses challenges related to different connection types within Kubernetes clusters, enabling efficient and secure communication between services. The framework provides specific solutions tailored to the security industry, helping organizations overcome obstacles and optimize their security infrastructure effectively. - - **E-commerce**: CloudWeGo's Kitex framework offers a powerful solution for e-commerce companies dealing with high-concurrency and high-performance challenges. By integrating Kitex with technologies like Istio, businesses can significantly improve their ability to handle peak traffic and ensure synchronized order processing. This enables efficient communication with multiple e-commerce platforms and prevents issues like overselling. The adoption of CloudWeGo and Kitex, along with the use of a cloud-native service mesh like Istio, enhances the overall performance, scalability, and reliability of e-commerce systems, providing businesses with a competitive edge in the rapidly evolving e-commerce industry. + - **Gaming**: CloudWeGo offers several benefits for the gaming industry. It enables game developers to scale their infrastructure to handle increased player demands, optimize performance, and simplify their business logic through service splitting. With components like current limiting, monitoring, and service registration/discovery, CloudWeGo ensures efficient resource utilization, enhanced responsiveness, and seamless coordination between game components. Additionally, the integration with OpenTelemetry provides valuable monitoring and diagnostics capabilities for developers to optimize their game services and deliver an exceptional gaming experience. Overall, CloudWeGo empowers the gaming industry by providing a scalable, efficient, and streamlined infrastructure for game development and operations. + - **Security**: In the security industry, CloudWeGo's adoption, specifically through the Kitex framework, brings significant benefits. Organizations can establish observability systems to monitor and analyze the performance of security services. This enables the identification of potential issues and ensures smooth operation. CloudWeGo also offers solutions for service stress testing, allowing organizations to optimize performance and ensure stability during high-load scenarios. Additionally, CloudWeGo addresses challenges related to different connection types within Kubernetes clusters, enabling efficient and secure communication between services. The framework provides specific solutions tailored to the security industry, helping organizations overcome obstacles and optimize their security infrastructure effectively. + - **E-commerce**: CloudWeGo's Kitex framework offers a powerful solution for e-commerce companies dealing with high-concurrency and high-performance challenges. By integrating Kitex with technologies like Istio, businesses can significantly improve their ability to handle peak traffic and ensure synchronized order processing. This enables efficient communication with multiple e-commerce platforms and prevents issues like overselling. The adoption of CloudWeGo and Kitex, along with the use of a cloud-native service mesh like Istio, enhances the overall performance, scalability, and reliability of e-commerce systems, providing businesses with a competitive edge in the rapidly evolving e-commerce industry. ## Getting Started -To begin your journey with CloudWeGo projects, you can refer to our [comprehensive documentation](https://www.cloudwego.io/docs/), which provides step-by-step instructions. Additionally, we regularly publish insightful blogs on various topics, including the latest innovations in [Kitex](https://github.com/cloudwego/kitex), [Hertz](https://github.com/cloudwego/hertz), [Monoio](https://www.cloudwego.io/blog/2023/04/17/introducing-monoio-a-high-performance-rust-runtime-based-on-io-uring/), [Shmipc](https://github.com/cloudwego/shmipc-go), and [Volo](https://github.com/cloudwego/volo). We also publish best practices blogs and have a dedicated blog that explores best practices for using [Kitex without a proxy](https://www.cncf.io/blog/2023/01/11/kitex-proxyless-practice-traffic-lane-implementation-with-istio-and-opentelemetry/). These resources serve as valuable references to help you make the most of CloudWeGo's offerings and stay up-to-date with the latest developments in the ecosystem. +To begin your journey with CloudWeGo projects, you can refer to our [comprehensive documentation](https://www.cloudwego.io/docs/), which provides step-by-step instructions. Additionally, we regularly publish insightful blogs on various topics, including the latest innovations in [Kitex](https://github.com/cloudwego/kitex), [Hertz](https://github.com/cloudwego/hertz), [Monoio](https://www.cloudwego.io/blog/2023/04/17/introducing-monoio-a-high-performance-rust-runtime-based-on-io-uring/), [Shmipc](https://github.com/cloudwego/shmipc-go), and [Volo](https://github.com/cloudwego/volo). We also publish best practices blogs and have a dedicated blog that explores best practices for using [Kitex without a proxy](https://www.cncf.io/blog/2023/01/11/kitex-proxyless-practice-traffic-lane-implementation-with-istio-and-opentelemetry/). These resources serve as valuable references to help you make the most of CloudWeGo's offerings and stay up-to-date with the latest developments in the ecosystem. ## Contributing + The CloudWeGo project is subdivided into subprojects under: + - [Kitex](https://github.com/cloudwego/kitex) (Kitex & Kitex ecosystem & kitex-contrib) - [Hertz](https://github.com/cloudwego/hertz) (Hertz & Hertz ecosystem & hertz-contrib) - [Netpoll](https://github.com/cloudwego/netpoll) (Netpoll & Netpoll ecosystem) - [Shmipc](https://github.com/cloudwego/shmipc-go) (shmipc-spec & shmipc-go) - [Volo](https://github.com/cloudwego/volo) (Volo & Volo ecosystem & volo-rs & Motore & Pilota) - [Website & Docs](https://github.com/cloudwego/community) (cloudwego.github.io & community) + Kitex is equipped with built-in governance strategies and expansion interfaces for frictionless integration into the microservice system. Hertz is a Go HTTP framework with high-performance and strong-extensibility for building micro-services. Netpoll is aimed at scenarios that demand high performance on RPC scenarios. Shmipc is a high performance inter-process communication library, built on Linux's shared memory technology and uses unix or tcp connection to do process synchronization and finally implements zero copy communication across inter-processes. Volo is a high-performance and strong-extensibility Rust RPC framework that helps developers build microservices. Each component of CloudWeGo can be used separately. -We welcome you to [contribute](https://github.com/cloudwego/community/blob/main/CONTRIBUTING.md) by submitting issues and PRs to build CloudWeGo together. Contributing to CloudWeGo involves various roles and responsibilities within the project's GitHub organization. The roles include Member, Committer, Reviewer, Approver, and Maintainer. Members are active contributors in the community and are expected to participate in discussions and make multiple contributions to the project. Committers are active code contributors and play a role in reviewing and approving code contributions. Reviewers have expertise in the codebase and provide feedback on contributions. Approvers review and approve code contributions, ensuring their holistic acceptance. Maintainers are responsible for setting technical direction, making design decisions, and ensuring the overall health of a subproject. The responsibilities and privileges of each role vary, but they all contribute to the growth and success of CloudWeGo. The responsibilities of contributor roles in CloudWeGo are outlined in our [community membership document](https://github.com/cloudwego/community/blob/main/COMMUNITY_MEMBERSHIP.md). +We welcome you to [contribute](https://github.com/cloudwego/community/blob/main/CONTRIBUTING.md) by submitting issues and PRs to build CloudWeGo together. Contributing to CloudWeGo involves various roles and responsibilities within the project's GitHub organization. The roles include Member, Committer, Reviewer, Approver, and Maintainer. Members are active contributors in the community and are expected to participate in discussions and make multiple contributions to the project. Committers are active code contributors and play a role in reviewing and approving code contributions. Reviewers have expertise in the codebase and provide feedback on contributions. Approvers review and approve code contributions, ensuring their holistic acceptance. Maintainers are responsible for setting technical direction, making design decisions, and ensuring the overall health of a subproject. The responsibilities and privileges of each role vary, but they all contribute to the growth and success of CloudWeGo. The responsibilities of contributor roles in CloudWeGo are outlined in our [community membership document](https://github.com/cloudwego/community/blob/main/COMMUNITY_MEMBERSHIP.md). We are excited for more developers to join, and also look forward to CloudWeGo helping more and more companies quickly build cloud-native architectures. Feel free to raise an issue in [Github](https://github.com/cloudwego) if you have any questions. Furthermore, you can join our [slack channel](https://cloudwego.slack.com/join/shared_invite/zt-tmcbzewn-UjXMF3ZQsPhl7W3tEDZboA#/shared-invite/email) to keep updated with the latest news. ## Community and Support + At ByteDance, we try to make the projects friendly to external users, and our internal projects will also use this open source project as a library for iterative development. CloudWeGo follows a key principle of maintaining one set of code internally and externally, iterating them as a whole. It has been gratifying to see Kitex gain 6000 stars, Hertz gain 3800+ stars and Netpoll gain 3600+ stars since its launch. More about all these projects can be found under our [cloudwego github repository](https://github.com/cloudwego). ### Maintenance + A complete microservice system builds upon a basic cloud ecosystem. No matter how the microservices are developed; based on the public cloud, a private cloud or your own infrastructure, additional services (including service governance platform, monitoring, tracing, service registry and discovery, configuration and service mesh etc) and some customized standards are needed to provide better service governance. At Bytedance, we have complete internal services to support the microservice system, but these services can not be open sourced in the short term. So, how will CloudWeGo maintain a set of codes internally and externally, and iterate them as a whole? Projects in CloudWeGo are not coupled with internal ecosystem. For example, Netpoll is directly migrated to open source libraries, and our internal dependencies are adjusted to open source libraries. Kitex’s code is split into two parts, including the core of Kitex which has been migrated to the open source library, and the encapsulated internal library which will provide transparent upgrades for internal users. For open source users who use Kitex, they can also extend Kitex and integrate Kitex into their own microservice system. We hope, and expect, that more developers will contribute their own extensions to [kitex-contrib](https://github.com/kitex-contrib), [hertz-contrib](https://github.com/hertz-contrib) and [volo-rs](https://github.com/volo-rs) providing help and convenience for more users. ## Conclusion + CloudWeGo is a rapidly growing project with a large and active community. It is a great choice for developers who are looking to build enterprise-class cloud native applications. If you are looking for a high-performance, extensible, and reliable middleware solution for your cloud native applications, then CloudWeGo is a great choice. -![image](/img/logo.png) \ No newline at end of file +![image](/img/logo.png) diff --git a/content/en/blog/news/Kitex_Proxyless_OpenTelemetry/index.md b/content/en/blog/news/Kitex_Proxyless_OpenTelemetry/index.md index 43d7843aca..34311c3288 100644 --- a/content/en/blog/news/Kitex_Proxyless_OpenTelemetry/index.md +++ b/content/en/blog/news/Kitex_Proxyless_OpenTelemetry/index.md @@ -1,6 +1,7 @@ --- date: 2022-12-20 title: "Kitex Proxyless Practice:Traffic Lane Implementation with Istio and OpenTelemetry" +projects: ["Kitex"] linkTitle: "Kitex Proxyless Practice:Traffic Lane Implementation with Istio and OpenTelemetry" keywords: ["CloudWeGo", "Proxyless", "Traffic Route", "Lane", "Bookinfo"] description: "This blog mainly introduces the realization of traffic routing based on Kitex Proxyless and the bookinfo demo rewrote with Kitex and Hertz. diff --git a/content/en/blog/news/Kitex_perf_optimize_practices/index.md b/content/en/blog/news/Kitex_perf_optimize_practices/index.md index 62a8140aa5..4bc80c50af 100644 --- a/content/en/blog/news/Kitex_perf_optimize_practices/index.md +++ b/content/en/blog/news/Kitex_perf_optimize_practices/index.md @@ -1,6 +1,7 @@ --- date: 2021-09-23 title: "Performance Optimization on Kitex" +projects: ["Kitex"] linkTitle: "Performance Optimization on Kitex" keywords: ["Kitex", "Optimization", "Netpoll", "Thrift", "Serialization"] description: "This blog introduces the performance optimization practice of Bytedance Go RPC framework Kitex, which includes Netpoll, Thrift, serialization and so on." diff --git a/content/en/blog/news/Kitex_performance_testing/index.md b/content/en/blog/news/Kitex_performance_testing/index.md index 7477c469dc..ad2b8d0afb 100644 --- a/content/en/blog/news/Kitex_performance_testing/index.md +++ b/content/en/blog/news/Kitex_performance_testing/index.md @@ -1,6 +1,7 @@ --- date: 2021-11-24 title: "Getting Started With Kitex's Practice: Performance Testing Guide" +projects: ["Kitex"] linkTitle: "Getting Started With Kitex's Practice: Performance Testing Guide" keywords: ["Kitex", "Performance Testing", "RPC"] description: "This blog describes how to use Kitex for performance testing and how to analyze the test results to help users tune Kitex with real RPC scenarios to better match business needs and maximize performance." diff --git a/content/en/blog/news/Monoio_Open_Source/index.md b/content/en/blog/news/Monoio_Open_Source/index.md index 4259ac5a36..62b76bd576 100644 --- a/content/en/blog/news/Monoio_Open_Source/index.md +++ b/content/en/blog/news/Monoio_Open_Source/index.md @@ -1,6 +1,7 @@ --- date: 2023-04-17 title: "Introducing Monoio: a high-performance Rust Runtime based on io-uring" +projects: ["Monoio"] linkTitle: "Introducing Monoio: a high-performance Rust Runtime based on io-uring" keywords: ["CloudWeGo", "Monoio", "io-uring", "Open Source", "Rust"] description: "This blog introduces the asynchronous mechanism of Rust, the design outline of Monoio, the comparison selection and application of Runtime, etc." @@ -8,31 +9,35 @@ author: CloudWeGo Rust Te --- ## Overview -Although Tokio is currently the 'de facto' standard for Rust asynchronous runtime, there is still some distance to go to achieve the ultimate performance of network middleware. + +Although Tokio is currently the 'de facto' standard for Rust asynchronous runtime, there is still some distance to go to achieve the ultimate performance of network middleware. In pursuit of this goal, the [CloudWeGo][CloudWeGo] Rust Team has explored providing asynchronous support for Rust based on io-uring and developed a universal gateway on this basis. This blog includes the following content: + 1. Introduction to Rust asynchronous runtime; 2. Design essentials of [Monoio][Monoio]; 3. Comparison and selection of runtime and application. ## Rust Asynchronous Mechanism -With the help of Rustc and LLVM, Rust can generate machine code that is efficient and secure enough. + +With the help of Rustc and LLVM, Rust can generate machine code that is efficient and secure enough. However, besides computing logic, an application often involves I/O, especially for network middleware, where I/O takes up a considerable proportion. -I/O operations require interaction with the operating system, and writing asynchronous programs is usually not a simple task. How does Rust solve these two problems? +I/O operations require interaction with the operating system, and writing asynchronous programs is usually not a simple task. How does Rust solve these two problems? For example, in C++, it is common to write callbacks, but we don't want to do this in Rust because it may encounter many lifetimes related issues. -Rust allows the implementation of a custom runtime to schedule tasks and execute syscalls, and provides unified interfaces such as Future. +Rust allows the implementation of a custom runtime to schedule tasks and execute syscalls, and provides unified interfaces such as Future. In addition, Rust provides built-in async-await syntax sugar to liberate programmers from callback programming. ![image](/img/blog/Monoio_Open_Source/1_2_en.png) ### Example -Let's start with a simple example to see how this system works. When downloading two files in parallel, you can start two threads in any language to download each file and then wait for the threads to finish execution. + +Let's start with a simple example to see how this system works. When downloading two files in parallel, you can start two threads in any language to download each file and then wait for the threads to finish execution. However, we don't want to start unnecessary threads just to wait for IO. If we need to wait for IO, we want the threads to do something else and only perform the IO operation when it's ready. -This event-driven triggering mechanism is often encountered in C++ in the form of callbacks. Callbacks interrupt our sequential logic, making the code less readable. +This event-driven triggering mechanism is often encountered in C++ in the form of callbacks. Callbacks interrupt our sequential logic, making the code less readable. Additionally, it's easy to encounter issues with the lifecycle of variables that callbacks depend on, such as releasing a variable referenced by a callback before the callback is executed. However, in Rust, you only need to create two tasks and wait for them to finish execution. @@ -41,8 +46,8 @@ However, in Rust, you only need to create two tasks and wait for them to finish In comparison to threads, asynchronous tasks are much more efficient in this example, but they don't significantly complicate the programming. -In the second example, let's mock an asynchronous function called `do_http` that directly returns 1. In reality, it could involve a series of asynchronous remote requests. -On top of that, we want to combine these asynchronous functions. Let's assume we make two requests and add up the results, and finally add 1. This is the sum function in this example. +In the second example, let's mock an asynchronous function called `do_http` that directly returns 1. In reality, it could involve a series of asynchronous remote requests. +On top of that, we want to combine these asynchronous functions. Let's assume we make two requests and add up the results, and finally add 1. This is the sum function in this example. Using the `async` and `await` syntax, we can easily nest these asynchronous functions. ```rust @@ -57,10 +62,11 @@ pub async fn sum() -> i32 { } ``` -This process is very similar to writing synchronous functions, which means it's more procedural programming rather than state-oriented programming. +This process is very similar to writing synchronous functions, which means it's more procedural programming rather than state-oriented programming. By using this mechanism, we can avoid the problem of writing a bunch of callbacks, bringing great convenience to programming. ### The Secret Behind Async/Await + Through these two examples, we can see how async is used in Rust and how convenient it is to write code with it. But what is the underlying principle behind it? ```rust @@ -79,7 +85,7 @@ pub async fn sum() -> i32 { The examples we just saw were written using Async and Await, which ultimately generates a structure that implements the Future trait. -Async and Await is actually syntactic sugar that can be expanded into Generator syntax at the HIR (High-level Intermediate Representation) stage. +Async and Await is actually syntactic sugar that can be expanded into Generator syntax at the HIR (High-level Intermediate Representation) stage. The Generator syntax is then further expanded by the compiler into a state machine at the MIR (Mid-level Intermediate Representation) stage. ![image](/img/blog/Monoio_Open_Source/5.png) @@ -101,16 +107,17 @@ pub enum Poll { ``` The Future describes the interface exposed by the state machine: + 1. Driving the state machine execution: The Poll method, as the name suggests, drives the execution of the state machine. Given a task, it triggers the task to perform state transitions. 2. Returning the execution result: 1. When encountering a blocking operation: Pending 2. When execution is completed: Ready + return value -From this, we can see that the essence of an asynchronous task is to implement the state machine of a Future. +From this, we can see that the essence of an asynchronous task is to implement the state machine of a Future. The program can manipulate it using the Poll method, which may indicate that it's currently encountering a blocking operation or that the task has completed and returned a result. -Since we have the Future trait, we can manually implement it. In doing so, the resulting code can be more readable compared to expanding with the Async and Await syntactic sugar. -Below is an example of manually generating a state machine. If we were to write it using the Async syntax, it might be as simple as an async function returning a 1. +Since we have the Future trait, we can manually implement it. In doing so, the resulting code can be more readable compared to expanding with the Async and Await syntactic sugar. +Below is an example of manually generating a state machine. If we were to write it using the Async syntax, it might be as simple as an async function returning a 1. However, when manually writing it, we need to define a custom struct and implement the Future trait for that struct. ```rust @@ -132,17 +139,17 @@ impl Future for DoHTTPFuture { } ``` -The essence of an async function is to return an anonymous structure that implements the Future trait. This type is automatically generated by the compiler, -so its name is not exposed to us. On the other hand, when manually implementing it, we define a struct called `DoHTTPFuture` and implement the Future trait for it. +The essence of an async function is to return an anonymous structure that implements the Future trait. This type is automatically generated by the compiler, +so its name is not exposed to us. On the other hand, when manually implementing it, we define a struct called `DoHTTPFuture` and implement the Future trait for it. Its output type, just like the return value of an async function, is an i32. These two approaches are equivalent. Since we only need to immediately return the number 1 in this case without any waiting involved, we can simply return `Ready(1)` in the poll implementation. -In the previous example of `sum`, it involved the composition of asynchronous logic: making two calls to `do_http` and then adding the two results together. +In the previous example of `sum`, it involved the composition of asynchronous logic: making two calls to `do_http` and then adding the two results together. If we were to manually implement this, it would be slightly more complex because it would involve two await points. Once await is involved, it essentially becomes a state machine. -Why a state machine? Because each await point may potentially block, and the thread cannot stop and wait there. It must switch to execute other tasks. -In order to resume the previous task later, its corresponding state must be stored. Here, we define two states: `FirstDoHTTP` and `SecondDoHTTP`. +Why a state machine? Because each await point may potentially block, and the thread cannot stop and wait there. It must switch to execute other tasks. +In order to resume the previous task later, its corresponding state must be stored. Here, we define two states: `FirstDoHTTP` and `SecondDoHTTP`. When implementing the poll function, we enter a loop where we match the current state and perform state transitions accordingly. ```rust @@ -161,7 +168,7 @@ enum SumFuture { impl Future for SumFuture { type Output = i32; - + fn poll(self: Pin<&mut Self>, cx: &mut Context<' >) -> Poll { let this = self.get mut( ); loop { @@ -191,40 +198,41 @@ impl Future for SumFuture { ``` ### The Relationship between Task, Future and Runtime + Let's use the example of TcpStream's Read/Write to illustrate the entire mechanism and the relationship between components. First, when we create a TCP stream, the component internally registers it with a poller. This poller can be thought of as a wrapper for epoll (the specific driver used depends on the platform). -Now, let's go through the steps in order. We have a task that needs to be spawned for execution. Essentially, spawning a task means putting it into the task queue of the runtime. +Now, let's go through the steps in order. We have a task that needs to be spawned for execution. Essentially, spawning a task means putting it into the task queue of the runtime. The runtime will continuously take tasks from the task queue and execute them. Execution involves advancing the state machine, which means invoking the poll method of the task. This brings us to the second step. ![image](/img/blog/Monoio_Open_Source/6_en.png) -We execute the poll method, which is essentially implemented by the user. Within this task, the user will invoke TcpStream's read/write functions. -Internally, these functions eventually make system calls to perform their functionality. However, before executing the syscall, certain conditions must be met, -such as the file descriptor (fd) being ready for reading or writing. If it doesn't meet these conditions, even if we execute the syscall, -we will only receive a WOULD_BLOCK error, resulting in wasted performance. Initially, we assume that newly added fds are both readable and writable, -so the first poll will execute the syscall. If there is no data to read or the kernel's write buffer is full, the syscall will return a WOULD_BLOCK error. +We execute the poll method, which is essentially implemented by the user. Within this task, the user will invoke TcpStream's read/write functions. +Internally, these functions eventually make system calls to perform their functionality. However, before executing the syscall, certain conditions must be met, +such as the file descriptor (fd) being ready for reading or writing. If it doesn't meet these conditions, even if we execute the syscall, +we will only receive a WOULD_BLOCK error, resulting in wasted performance. Initially, we assume that newly added fds are both readable and writable, +so the first poll will execute the syscall. If there is no data to read or the kernel's write buffer is full, the syscall will return a WOULD_BLOCK error. Upon detecting this error, we modify the readiness record and set the relevant read/write for that fd as not ready. At this point, we can only return Pending. -Next, we move to the fourth step. When all the tasks in our task queue have finished execution and all tasks are blocked on I/O, it means that none of the I/O operations are ready. +Next, we move to the fourth step. When all the tasks in our task queue have finished execution and all tasks are blocked on I/O, it means that none of the I/O operations are ready. The thread will continuously block in the poller's wait method, which can be thought of as something similar to epoll_wait. When using io_uring for implementation, this may correspond to another syscall. -At this point, entering a syscall is reasonable because there are no tasks to execute, and there is no need to continuously poll the I/O status. -Entering a syscall allows the CPU time slice to be yielded to other tasks on the same machine. If any I/O operation becomes ready, we will return from the syscall, +At this point, entering a syscall is reasonable because there are no tasks to execute, and there is no need to continuously poll the I/O status. +Entering a syscall allows the CPU time slice to be yielded to other tasks on the same machine. If any I/O operation becomes ready, we will return from the syscall, and the kernel will inform us which events on which fds have become ready. For example, if we are interested in the readability of a specific fd, it will inform us that the fd is ready for reading. -We need to mark the readiness of the fd as readable and wake up the tasks waiting on it. In the previous step, there was a task waiting here, dependent on the I/O being readable. -Now that the condition is met, we need to reschedule it. Waking up essentially means putting the task back into the task queue. Implementation-wise, this is achieved through the wake-related methods of a Waker. -The handling behavior of wake is implemented by the runtime, and the simplest implementation is to use a deque to store tasks, pushing them in when waking. +We need to mark the readiness of the fd as readable and wake up the tasks waiting on it. In the previous step, there was a task waiting here, dependent on the I/O being readable. +Now that the condition is met, we need to reschedule it. Waking up essentially means putting the task back into the task queue. Implementation-wise, this is achieved through the wake-related methods of a Waker. +The handling behavior of wake is implemented by the runtime, and the simplest implementation is to use a deque to store tasks, pushing them in when waking. More complex implementations may consider mechanisms such as task stealing and distribution for cross-thread scheduling. -When this task is polled again, it will perform TcpStream read internally. It will find that the I/O is in a readable state, so it will execute the read syscall, +When this task is polled again, it will perform TcpStream read internally. It will find that the I/O is in a readable state, so it will execute the read syscall, and at this point, the syscall will execute correctly, and TcpStream read will return Ready to the outside. ### Waker -Earlier, we mentioned the Waker. Now let's discuss how the Waker works. We know that a Future is essentially a state machine, and each time it is polled, it returns Pending or Ready. +Earlier, we mentioned the Waker. Now let's discuss how the Waker works. We know that a Future is essentially a state machine, and each time it is polled, it returns Pending or Ready. When it encounters an IO block and returns Pending, who detects the IO readiness? And how is the Future driven again once the IO is ready? ```rust @@ -241,8 +249,8 @@ pub struct Context<'a> { } ``` -Inside the Future trait, in addition to containing the mutable borrow of its own state machine, there is another important component called Context. -Currently, the Context only has one meaningful member, which is the Waker. For now, we can consider the Waker as a trait object constructed and implemented by the runtime. +Inside the Future trait, in addition to containing the mutable borrow of its own state machine, there is another important component called Context. +Currently, the Context only has one meaningful member, which is the Waker. For now, we can consider the Waker as a trait object constructed and implemented by the runtime. Its implementation effect is that when we wake this Waker, the task is added back to the task queue and may be executed immediately or later. Let's take another example to understand the whole process: @@ -250,6 +258,7 @@ Let's take another example to understand the whole process: ![image](/img/blog/Monoio_Open_Source/7.png) When a user calls `listener.accept()` to generate an `AcceptFut` and waits: + 1. The `fut.await` internally calls the `poll` method of the Future using the `cx` (Context). 2. Inside the `poll`, a syscall is executed. 3. There are no incoming connections, so the kernel returns `WOULD_BLOCK`. @@ -264,10 +273,11 @@ When a user calls `listener.accept()` to generate an `AcceptFut` and waits: 12. 12/13, Kernel returns the syscall result, and poll returns Ready. ### Runtime + 1. Let's start with the executor. It has an executor and a task queue. Its job is to continuously take tasks from the queue and drive their execution. When all tasks have completed and it must wait, it hands over control to the Reactor. -2. Once the Reactor receives control, it interacts with the kernel and waits for IO readiness. After IO is ready, we need to mark the readiness state of that IO and wake up the tasks associated with it. +2. Once the Reactor receives control, it interacts with the kernel and waits for IO readiness. After IO is ready, we need to mark the readiness state of that IO and wake up the tasks associated with it. After waking up, the control is handed back to the executor. When the executor executes the task, it will invoke the capabilities provided by the IO component. -3. The IO component needs to provide these asynchronous interfaces. For example, when a user wants to use TcpStream, they need to use a TcpStream provided by the runtime instead of the standard library directly. +3. The IO component needs to provide these asynchronous interfaces. For example, when a user wants to use TcpStream, they need to use a TcpStream provided by the runtime instead of the standard library directly. Secondly, the component should be able to register its file descriptor (fd) with the Reactor. Thirdly, when the IO is not ready, we can place the waker associated with that task in the relevant area. That's roughly how the asynchronous mechanism in Rust works. @@ -275,50 +285,53 @@ That's roughly how the asynchronous mechanism in Rust works. ![image](/img/blog/Monoio_Open_Source/8_en.png) ## Monoio Design -[Monoio][Monoio] is a thread-per-core Rust runtime with io_uring/epoll/kqueue. And it is designed to be the most efficient and performant thread-per-core Rust runtime with good platform compatibility. + +[Monoio][Monoio] is a thread-per-core Rust runtime with io_uring/epoll/kqueue. And it is designed to be the most efficient and performant thread-per-core Rust runtime with good platform compatibility. The following will describe the key points of the [Monoio][Monoio] Runtime design in four parts: + 1. Async IO interface based on GAT (Generic Associated Types). 2. Driver detection and switching transparent to the upper layer. 3. Balancing performance and functionality. 4. Providing compatibility with the Tokio interface. ### Pure async IO interface based on GAT + First, let's introduce two notification mechanisms. The first one is similar to epoll, which is a notification based on readiness states. The second one is the io-uring mode, which is a "completion notification" based mode. ![image](/img/blog/Monoio_Open_Source/9_en.png) -In the readiness-based mode, tasks wait and detect IO readiness through epoll, and only perform syscalls when the IO is ready. +In the readiness-based mode, tasks wait and detect IO readiness through epoll, and only perform syscalls when the IO is ready. However, in the completion notification-based mode, Monoio can be lazier: it simply tells the kernel what the current task wants to do and then lets go. -io_uring allows users and the kernel to share two lock-free queues: the submission queue (SQ) is written by user-space programs and consumed by the kernel, -while the completion queue (CQ) is written by the kernel and consumed by user-space. The enter syscall can be used to submit the SQEs (Submission Queue Entries) in the queue to the kernel, +io_uring allows users and the kernel to share two lock-free queues: the submission queue (SQ) is written by user-space programs and consumed by the kernel, +while the completion queue (CQ) is written by the kernel and consumed by user-space. The enter syscall can be used to submit the SQEs (Submission Queue Entries) in the queue to the kernel, and optionally, it can also enter and wait for CQEs (Completion Queue Entries). In syscall-intensive applications, using io_uring can significantly reduce the number of context switches, and io_uring itself can also reduce data copying in the kernel. ![image](/img/blog/Monoio_Open_Source/10_en.png) -The differences between these two modes will greatly influence the design of the Runtime and IO interface. In the first mode, there is no need to hold the buffer while waiting; -the buffer is only needed when executing the syscall. Therefore, in this mode, users can pass the `&mut Buffer` when calling the actual poll (e.g., poll_read). +The differences between these two modes will greatly influence the design of the Runtime and IO interface. In the first mode, there is no need to hold the buffer while waiting; +the buffer is only needed when executing the syscall. Therefore, in this mode, users can pass the `&mut Buffer` when calling the actual poll (e.g., poll_read). In the second mode, once submitted to the kernel, the kernel can access the buffer at any time, and [Monoio][Monoio] must ensure the validity of the buffer before the corresponding CQE for that task returns. -If existing async IO traits (such as Tokio/async-std, etc.) are used, passing a reference to the buffer during read/write may result in memory safety issues such as Use-After-Free (UAF). -For example, if the user pushes the buffer pointer into the uring SQ when calling read, but immediately drops the created Future (`read(&mut buffer)`) and drops the buffer, +If existing async IO traits (such as Tokio/async-std, etc.) are used, passing a reference to the buffer during read/write may result in memory safety issues such as Use-After-Free (UAF). +For example, if the user pushes the buffer pointer into the uring SQ when calling read, but immediately drops the created Future (`read(&mut buffer)`) and drops the buffer, this behavior does not violate Rust's borrowing rules, but the kernel will still access freed memory, potentially stomping on memory blocks allocated by the user program later on. -One solution in this case is to capture ownership of the buffer. When generating the Future, the ownership is given to the Runtime, so the user cannot access the buffer in any way, +One solution in this case is to capture ownership of the buffer. When generating the Future, the ownership is given to the Runtime, so the user cannot access the buffer in any way, thus ensuring the validity of the pointer before the kernel returns the CQE. This solution is inspired by the approach used in Tokio-uring. -[Monoio][Monoio] defines the `AsyncReadRent` trait. The term "Rent" refers to borrowing, where the Runtime takes the buffer from the user first and returns it later. -The type `ReadFuture` here has a lifetime generic, which is made possible by GAT (Generic Associated Types). GAT is now stable and can be used in the stable version. -When implementing the associated Future, the `TAIT` trait can be used directly with the async/await syntax, which is much more convenient and user-friendly compared to manually defining Futures. +[Monoio][Monoio] defines the `AsyncReadRent` trait. The term "Rent" refers to borrowing, where the Runtime takes the buffer from the user first and returns it later. +The type `ReadFuture` here has a lifetime generic, which is made possible by GAT (Generic Associated Types). GAT is now stable and can be used in the stable version. +When implementing the associated Future, the `TAIT` trait can be used directly with the async/await syntax, which is much more convenient and user-friendly compared to manually defining Futures. This feature is not yet stable (it is now called `impl trait in assoc type`). -However, transferring ownership introduces new issues. In the readiness-based mode, canceling IO only requires dropping the Future. Here, if the Future is dropped, -it may lead to data flow errors on the connection (as the Future might be dropped at the moment when the syscall has already succeeded). -Additionally, a more serious problem is that the buffer captured by the Future will definitely be lost. To address these two issues, Monoio supports IO traits with cancellation capabilities. -When canceled, a `CancelOp` is pushed, and the user needs to continue waiting for the original Future to complete (as it has been canceled, it is expected to return within a short time). +However, transferring ownership introduces new issues. In the readiness-based mode, canceling IO only requires dropping the Future. Here, if the Future is dropped, +it may lead to data flow errors on the connection (as the Future might be dropped at the moment when the syscall has already succeeded). +Additionally, a more serious problem is that the buffer captured by the Future will definitely be lost. To address these two issues, Monoio supports IO traits with cancellation capabilities. +When canceled, a `CancelOp` is pushed, and the user needs to continue waiting for the original Future to complete (as it has been canceled, it is expected to return within a short time). The corresponding syscall may succeed or fail and return the buffer. ### Automatic Driver Detection and Switching for Higher Level @@ -338,42 +351,46 @@ trait OpAble { 3. Implement internally using the OpAble unified component (abstracting Read, Write, and other operations). Specifically, for operations such as accept, connect, read, write, etc., which are implemented as OpAble, they correspond to the following three functions: + 1. uring_op: Generates the corresponding uring SQE. 2. legacy_interest: Returns the direction of read/write it is interested in. 3. legacy_call: Executes the syscall directly. ![image](/img/blog/Monoio_Open_Source/11_en.png) -The entire process will submit a structure that implements OpAble to the driver. It will then return something that implements the Future. +The entire process will submit a structure that implements OpAble to the driver. It will then return something that implements the Future. When polling or dropping, it will specifically dispatch to one of the two driver implementations, using one or two of the three functions mentioned. ### Performance + Performance is the starting point and the major advantage of [Monoio][Monoio]. In addition to the improvements brought by io_uring, it is designed as a thread-per-core Runtime. + 1. All tasks run only on fixed threads, without task stealing. 2. The task queue is a thread-local structure, operated without locks or contention. High performance stems from two aspects: + 1. High performance within the Runtime: Essentially equivalent to direct syscall integration. 2. High performance in user code: Structures are designed to be thread-local and avoid crossing thread boundaries as much as possible. Comparison of task stealing and thread-per-core mechanisms: -In the case of using Tokio, there may be very few tasks on one thread, while another thread has a high workload. In this situation, the idle thread can steal tasks from the busy thread, +In the case of using Tokio, there may be very few tasks on one thread, while another thread has a high workload. In this situation, the idle thread can steal tasks from the busy thread, which is similar to how it works in Go. This mechanism allows for better utilization of the CPU and can achieve good performance in general scenarios. -However, cross-thread operations themselves incur overhead, and when multiple threads operate on data structures, locks or lock-free structures are required. -Lock-free does not mean there is no additional overhead. Compared to purely thread-local operations, cross-thread lock-free structures can impact cache performance, +However, cross-thread operations themselves incur overhead, and when multiple threads operate on data structures, locks or lock-free structures are required. +Lock-free does not mean there is no additional overhead. Compared to purely thread-local operations, cross-thread lock-free structures can impact cache performance, and CAS (Compare and Swap) operations may involve some unnecessary loops. Moreover, this threading model also affects user code. -For example, suppose we need an SDK internally to collect some metrics from this program and aggregate them before reporting. -Achieving optimal performance can be challenging with a Tokio-based implementation. However, with a thread-per-core Runtime structure, -we can place the aggregated map in thread-local storage without any locks or contention issues. Each thread can start a task that periodically clears and reports the data in the thread-local storage. -In scenarios where tasks may cross thread boundaries, we would need to use a global structure to aggregate the metrics and have a global task for reporting data. +For example, suppose we need an SDK internally to collect some metrics from this program and aggregate them before reporting. +Achieving optimal performance can be challenging with a Tokio-based implementation. However, with a thread-per-core Runtime structure, +we can place the aggregated map in thread-local storage without any locks or contention issues. Each thread can start a task that periodically clears and reports the data in the thread-local storage. +In scenarios where tasks may cross thread boundaries, we would need to use a global structure to aggregate the metrics and have a global task for reporting data. It becomes challenging to use lock-free data structures for aggregation in such scenarios. -Therefore, both threading models have their advantages. The thread-per-core model achieves better performance for tasks that can be processed independently. -Sharing fewer resources leads to better performance. However, the drawback of the thread-per-core model is that it cannot fully utilize the CPU when the workload is unevenly distributed among tasks. -For specific scenarios such as gateway proxies, the thread-per-core model is more likely to fully utilize hardware performance and achieve good horizontal scalability. +Therefore, both threading models have their advantages. The thread-per-core model achieves better performance for tasks that can be processed independently. +Sharing fewer resources leads to better performance. However, the drawback of the thread-per-core model is that it cannot fully utilize the CPU when the workload is unevenly distributed among tasks. +For specific scenarios such as gateway proxies, the thread-per-core model is more likely to fully utilize hardware performance and achieve good horizontal scalability. Popular solutions like Nginx and Envoy employ this threading model. ![image](/img/blog/Monoio_Open_Source/12_en.png) @@ -381,22 +398,24 @@ Popular solutions like Nginx and Envoy employ this threading model. We have conducted some benchmarks, and [Monoio][Monoio] demonstrates excellent performance scalability. As the number of CPU cores increases, you only need to add the corresponding threads. ### Functionality + Thread-per-core does not imply the absence of cross-thread capabilities. Users can still use some shared structures across threads, which are unrelated to the Runtime. The Runtime provides the ability to wait across threads. - -Tasks are executed within the local thread but can wait for tasks on other threads. This is an essential capability. For example, if a user needs to fetch remote configurations with a single thread and distribute them to all threads, + +Tasks are executed within the local thread but can wait for tasks on other threads. This is an essential capability. For example, if a user needs to fetch remote configurations with a single thread and distribute them to all threads, they can easily implement this functionality based on this capability. ![image](/img/blog/Monoio_Open_Source/13.png) - -The essence of cross-thread waiting is for a task on another thread to wake up the local thread. In the implementation, we mark the ownership of tasks in the Waker. -If the current thread is not the task's owner thread, the Runtime will send the task to its owner thread using a lock-free queue. -If the target thread is in a sleep state (e.g., waiting for IO in a syscall), the Runtime will wake it up using a pre-installed eventfd. + +The essence of cross-thread waiting is for a task on another thread to wake up the local thread. In the implementation, we mark the ownership of tasks in the Waker. +If the current thread is not the task's owner thread, the Runtime will send the task to its owner thread using a lock-free queue. +If the target thread is in a sleep state (e.g., waiting for IO in a syscall), the Runtime will wake it up using a pre-installed eventfd. After being awakened, the target thread will process the cross-thread waker queue. - + In addition to providing the ability for cross-thread waiting, [Monoio][Monoio] also offers the spawn_blocking capability for users to execute heavy computational logic without affecting other tasks in the same thread. - + ### Compatibility Interfaces -Since many components (such as Hyper) are bound to Tokio's IO traits, and as mentioned earlier, it is not possible to unify these two IO traits due to the underlying drivers. + +Since many components (such as Hyper) are bound to Tokio's IO traits, and as mentioned earlier, it is not possible to unify these two IO traits due to the underlying drivers. This can create difficulties in terms of ecosystem compatibility. For some non-hot-path components, it is necessary to allow users to use them in a compatible manner, even if it incurs some performance cost. ![image](/img/blog/Monoio_Open_Source/14.png) @@ -409,43 +428,43 @@ let tcp = monoio_compat::StreamWrapper::new(monoio_tcp); let monoio_tcp = monoio::net::TcpStream::connect("1.1.1.1:80").await.unwrap(); // both of them implements tokio:: io::AsyncReadd and tokio:: io: AsyncWrite ``` - -We provide a wrapper that includes a buffer, and when users use it, they need to incur an additional memory copy overhead. + +We provide a wrapper that includes a buffer, and when users use it, they need to incur an additional memory copy overhead. Through this approach, we can wrap components of Monoio into Tokio-compatible interfaces, allowing them to be used with compatible components. ## Runtime Comparison & Applications + This section discusses some runtime comparison options and their applications. - -We have already mentioned the comparison between uniform scheduling and thread-per-core. Now let's focus on their application scenarios. + +We have already mentioned the comparison between uniform scheduling and thread-per-core. Now let's focus on their application scenarios. For a large number of lightweight tasks, the thread-per-core mode is suitable. This is particularly applicable to applications such as proxies, gateways, and file IO-intensive tasks, making Monoio an excellent choice. - -Additionally, while Tokio aims to be a general cross-platform solution, [Monoio][Monoio] was designed from the beginning with a focus on achieving optimal performance, primarily using io_uring. + +Additionally, while Tokio aims to be a general cross-platform solution, [Monoio][Monoio] was designed from the beginning with a focus on achieving optimal performance, primarily using io_uring. Although it can also support epoll and kqueue, they serve as fallback options. For example, kqueue is primarily included to facilitate development on macOS, but it is not intended for actual production use (support for Windows is planned in the future). - -In terms of ecosystem, Tokio has a comprehensive ecosystem, while [Monoio][Monoio] lags behind in this aspect. Even with the compatibility layer, there are inherent costs. -Tokio's task stealing can perform well in many scenarios, but it has limited scalability. On the other hand, Monoio demonstrates good scalability, but it has certain limitations in terms of business scenarios and programming models. -Therefore, Monoio is well-suited for scenarios such as proxies, gateways, and data aggregation in caches. It is also suitable for components that perform file IO, as io_uring excels in handling file IO. + +In terms of ecosystem, Tokio has a comprehensive ecosystem, while [Monoio][Monoio] lags behind in this aspect. Even with the compatibility layer, there are inherent costs. +Tokio's task stealing can perform well in many scenarios, but it has limited scalability. On the other hand, Monoio demonstrates good scalability, but it has certain limitations in terms of business scenarios and programming models. +Therefore, Monoio is well-suited for scenarios such as proxies, gateways, and data aggregation in caches. It is also suitable for components that perform file IO, as io_uring excels in handling file IO. Without io_uring, there is no true asynchronous file IO available in Linux; only with io_uring can this be achieved. Monoio is also suitable for components that heavily rely on file IO, such as database-related components. -| | Tokio | Monoio | -|---|---|--| -| Scene | General, task stealing | Specific, thread-per-core | -| Platform | Cross-platform, epoll/kqueue/iocp | Specific, io_uring, epoll/kqueue as fallback | -| Ecosystem | comprehensive | Relatively lacking | -| Horizontal extensibility | Not Good Enough | Good | -| Application | General business | Proxy, Gateway, cache, data aggregate, etc | +| | Tokio | Monoio | +| ------------------------ | --------------------------------- | -------------------------------------------- | +| Scene | General, task stealing | Specific, thread-per-core | +| Platform | Cross-platform, epoll/kqueue/iocp | Specific, io_uring, epoll/kqueue as fallback | +| Ecosystem | comprehensive | Relatively lacking | +| Horizontal extensibility | Not Good Enough | Good | +| Application | General business | Proxy, Gateway, cache, data aggregate, etc | -Tokio-uring is actually a layer built on top of Tokio, somewhat like a distribution layer. Its design is elegant, and we have also referenced many of its designs, such as the ownership transfer mechanism. -However, it is still based on Tokio and runs uring on top of epoll, without achieving full transparency for users. When implementing components, one can only choose between using epoll or using uring. +Tokio-uring is actually a layer built on top of Tokio, somewhat like a distribution layer. Its design is elegant, and we have also referenced many of its designs, such as the ownership transfer mechanism. +However, it is still based on Tokio and runs uring on top of epoll, without achieving full transparency for users. When implementing components, one can only choose between using epoll or using uring. If uring is chosen, the resulting binary cannot run on older versions of Linux. On the other hand, Monoio addresses this issue well and supports dynamic detection of uring availability. ### Applications of Monoio + 1. Monoio Gateway: A gateway service based on the [Monoio][Monoio] ecosystem. In optimized version benchmarks, its performance surpasses that of Nginx. 2. [Volo][Volo]: An RPC framework open-sourced by the [CloudWeGo][CloudWeGo] team, currently being integrated. The PoC version demonstrates a 26% performance improvement compared to the Tokio-based version. We have also conducted some internal business trials, and in the future, we will focus on improving compatibility and component development to make Monoio even more user-friendly. - [CloudWeGo]: https://github.com/cloudwego [Monoio]: https://github.com/bytedance/monoio [Volo]: https://github.com/cloudwego/volo - diff --git a/content/en/blog/news/Shmipc_Open_Source/index.md b/content/en/blog/news/Shmipc_Open_Source/index.md index 99ac0cb25c..7dcb178e00 100644 --- a/content/en/blog/news/Shmipc_Open_Source/index.md +++ b/content/en/blog/news/Shmipc_Open_Source/index.md @@ -1,107 +1,122 @@ --- date: 2023-04-04 title: "Introducing Shmipc: A High Performance Inter-process Communication Library" +projects: ["Shmipc"] linkTitle: "Introducing Shmipc: A High Performance Inter-process Communication Library" keywords: ["CloudWeGo", "zero copy", "shared memory", "IPC"] description: "This blog introduces the background, design ideas and performance of CloudWeGo-Shmipc project, as well as the lessons learned from ByteDance and project roadmap。" author: CloudWeGo Team --- -We are excited to introduce an open source project - **Shmipc**, a **high performance inter-process communication library** developed by ByteDance. -It is built on Linux's **shared memory technology** and uses unix or tcp connection to do process synchronization and finally implements +We are excited to introduce an open source project - **Shmipc**, a **high performance inter-process communication library** developed by ByteDance. +It is built on Linux's **shared memory technology** and uses unix or tcp connection to do process synchronization and finally implements zero copy communication across inter-processes. In IO-intensive or large-package scenarios, it has better performance. -There isn't much information on this area, so the open-source of Shmipc would like to contribute by providing a valuable reference. +There isn't much information on this area, so the open-source of Shmipc would like to contribute by providing a valuable reference. In this blog, we would like to cover some of the main **design ideas** of Shmipc, the **problems** encountered during the adoption process and the subsequent **evolution plan**. + - Design: https://github.com/cloudwego/shmipc-spec - Implementation in Golang: https://github.com/cloudwego/shmipc-go ## Background and Motivation -At ByteDance, Service Mesh has undergone a lot of performance optimization during its adoption and evolution. The **traffic interception** of Service Mesh is -achieved by **inter-process communication** between the mesh proxy and the microservice framework's **agreed-upon addresses**, which performs better than iptables solutions. + +At ByteDance, Service Mesh has undergone a lot of performance optimization during its adoption and evolution. The **traffic interception** of Service Mesh is +achieved by **inter-process communication** between the mesh proxy and the microservice framework's **agreed-upon addresses**, which performs better than iptables solutions. However, conventional optimization methods no longer bring significant performance improvements. Therefore, we shifted our focus to inter-process communication, and **Shmipc** was born. ## Design Ideas + ### Zero Copy -The two most widely used inter-process communication methods in production environments are unix domain sockets and TCP loopback (localhost:$PORT), -and their performance differences are not significant from the benchmark. From a technical standpoint, both require copying communication data between user space and kernel space. + +The two most widely used inter-process communication methods in production environments are unix domain sockets and TCP loopback (localhost:$PORT), +and their performance differences are not significant from the benchmark. From a technical standpoint, both require copying communication data between user space and kernel space. In the RPC scenario, there are four memory copies in inter-process communication during a single RPC process, with two copies in the request path and two copies in the response path. ![image](/img/blog/Shmipc_Open_Source/zero_copy.png) -Although sequential copying on modern CPUs is very fast, eliminating up to four memory copies can still save CPU usage in large packet scenarios. +Although sequential copying on modern CPUs is very fast, eliminating up to four memory copies can still save CPU usage in large packet scenarios. With the zero-copy feature based on shared memory communication, we can easily achieve this. However, to achieve zero-copy, there will be many additional tasks surrounding shared memory itself, such as: -1. In-depth serialization and deserialization of microservice frameworks. We hope that when a Request or Response is serialized, the corresponding binary data is already in shared memory, + +1. In-depth serialization and deserialization of microservice frameworks. We hope that when a Request or Response is serialized, the corresponding binary data is already in shared memory, rather than being serialized to a non-shared memory buffer and then copied to a shared memory buffer. 2. Implementing a process synchronization mechanism. When one process writes data to shared memory, another process does not know about it, so a synchronization mechanism is needed for notification. 3. Efficient memory allocation and recycling. Ensuring that the allocation and recycling mechanism of shared memory across processes has a low overhead to avoid masking the benefits of zero-copy features. ### Synchronization Mechanism + Consider different scenarios: -1. On-demand real-time synchronization. Suitable for online scenarios that are extremely sensitive to latency. Notify the other process after each write operation is completed. - There are many options to choose from on Linux, such as TCP loopback, unix domain sockets, event fd, etc. Event fd has slightly better benchmark performance, - but passing fd across processes introduces too much complexity. The performance improvement it brings is not very significant in IPC, and the trade-off between + +1. On-demand real-time synchronization. Suitable for online scenarios that are extremely sensitive to latency. Notify the other process after each write operation is completed. + There are many options to choose from on Linux, such as TCP loopback, unix domain sockets, event fd, etc. Event fd has slightly better benchmark performance, + but passing fd across processes introduces too much complexity. The performance improvement it brings is not very significant in IPC, and the trade-off between complexity and performance needs to be carefully considered. At ByteDance, we chose unix domain sockets for process synchronization. -2. Periodic synchronization. Suitable for offline scenarios that are not sensitive to latency. Access the custom flag in shared memory through high-interval sleep +2. Periodic synchronization. Suitable for offline scenarios that are not sensitive to latency. Access the custom flag in shared memory through high-interval sleep to determine whether there is data written. However, note that sleep itself also requires a system call and has greater overhead than reading and writing with unix domain sockets. 3. Polling synchronization. Suitable for scenarios where latency is very sensitive but the CPU is not as sensitive. You can complete it by polling the custom flag in shared memory on a single core. -Overall, on-demand real-time synchronization and periodic synchronization require system calls to complete, +Overall, on-demand real-time synchronization and periodic synchronization require system calls to complete, while polling synchronization does not require system calls but requires running a CPU core at full capacity under normal circumstances. ### Batching IO Operations -In online scenarios, real-time synchronization is required on demand for each data write, which requires a process synchronization operation (Step 4 in the figure below). -Although the latency issue is resolved, to demonstrate the benefits of zero-copy on performance, the number of packets that require interaction needs to be greater than a relatively large threshold. + +In online scenarios, real-time synchronization is required on demand for each data write, which requires a process synchronization operation (Step 4 in the figure below). +Although the latency issue is resolved, to demonstrate the benefits of zero-copy on performance, the number of packets that require interaction needs to be greater than a relatively large threshold. Therefore, an IO queue was constructed in shared memory to complete batch IO operation, enabling benefits to be demonstrated even in small packet IO-intensive scenarios. -The core idea is that when a process writes a request to the IO queue, it notifies the other process to handle it. -When the next request comes in(corresponding to IO Event 2~N in the figure, an IO Event can independently describe the position of a request in shared memory), +The core idea is that when a process writes a request to the IO queue, it notifies the other process to handle it. +When the next request comes in(corresponding to IO Event 2~N in the figure, an IO Event can independently describe the position of a request in shared memory), if the other process is still processing requests in the IO queue, there is no need to send a notification. Therefore, the more dense the IO, the better the batching effect. ![image](/img/blog/Shmipc_Open_Source/share_memory.jpeg) -In addition, in offline scenarios, scheduled synchronization itself is a form of batch processing for IO, and the effect of batch processing can +In addition, in offline scenarios, scheduled synchronization itself is a form of batch processing for IO, and the effect of batch processing can effectively reduce the system calls caused by process synchronization. The longer the sleep interval, the lower the overhead of process synchronization. -As for polling synchronization, there is no need to consider batch IO operation because this mechanism itself is designed to reduce process synchronization overhead. +As for polling synchronization, there is no need to consider batch IO operation because this mechanism itself is designed to reduce process synchronization overhead. Polling synchronization directly occupies a CPU core, which is equivalent to defaulting to maximizing the synchronization mechanism overhead to achieve extremely low synchronization latency. ## Performance + ### Benchmark + ![image](/img/blog/Shmipc_Open_Source/benchmark.png) -The X-axis represents the size of the data packet, and the Y-axis represents the time required for one Ping-Pong in microseconds, with smaller values being better. +The X-axis represents the size of the data packet, and the Y-axis represents the time required for one Ping-Pong in microseconds, with smaller values being better. It can be seen that in small packet scenarios, Shmipc can also achieve some benefits compared to unix domain sockets, and **performance improves as packet size increases**. **Source**: `git clone https://github.com/cloudwego/shmipc-go && go test -bench=BenchmarkParallelPingPong -run BenchmarkParallelPingPong` ### Production Environment -In the Service Mesh ecosystem of ByteDance's production environment, we have applied Shmipc in **over 3,000 services and more than 1 million instances**. -Different business scenarios show different benefits, with the **highest** benefit being a **24% reduction** in overall resource usage for the risk control business. + +In the Service Mesh ecosystem of ByteDance's production environment, we have applied Shmipc in **over 3,000 services and more than 1 million instances**. +Different business scenarios show different benefits, with the **highest** benefit being a **24% reduction** in overall resource usage for the risk control business. Of course, there are also scenarios with no obvious benefits or even deterioration. However, significant benefits can be seen in **both large packet and IO-intensive scenarios**. ## Lessons Learned + During the adoption process at ByteDance, we also encountered some pitfalls that caused some online accidents, which are quite valuable for reference. -1. **Shared memory leak**. The shared memory allocation and recovery in the IPC process involve two processes and can easily lead to shared memory leaks if not careful. + +1. **Shared memory leak**. The shared memory allocation and recovery in the IPC process involve two processes and can easily lead to shared memory leaks if not careful. Although the problem is very tricky, as long as it can be discovered actively when a leak occurs and there are observation methods to troubleshoot the leak afterwards, it can be solved. 1. Active discovery. By increasing some statistics and summarizing them in the monitoring system, active discovery can be achieved, such as the total memory size allocated and recovered. - 2. Observation methods. When designing the layout of shared memory, adding some metadata enables us to analyze shared memory dumped by the built-in debug tool at the time of the leak, + 2. Observation methods. When designing the layout of shared memory, adding some metadata enables us to analyze shared memory dumped by the built-in debug tool at the time of the leak, providing information on how much memory is leaked, what is in it, and some metadata related to this content. -2. **Packet congestion**. Packet congestion is the most troublesome problem, which can cause serious consequences due to various reasons. We once had a packet congestion accident in a certain business, - which was caused by the depletion of shared memory due to large packets. During the fallback to the normal path, there was a design defect, which caused a small probability of packet congestion. +2. **Packet congestion**. Packet congestion is the most troublesome problem, which can cause serious consequences due to various reasons. We once had a packet congestion accident in a certain business, + which was caused by the depletion of shared memory due to large packets. During the fallback to the normal path, there was a design defect, which caused a small probability of packet congestion. A valuable reference is to **increase integration testing and unit testing** in more scenarios to kill packet congestion in the cradle instead of explaining the investigation process and root cause. -3. **Shared memory trampling**. '**memfd**' should be used as much as possible to share memory, rather than the path of 'mmap' file system. - In the early days, the 'mmap' file system path was used for shared memory. The startup process of Shmipc and the path of shared memory were specified by environment variables, - and the boot process was injected into the application process. There is a situation where the application process may fork a process, which inherits the environment variables of the application process - and also integrates Shmipc. The forked process and the application process mmaped the same shared memory, resulting in trampling. - In ByteDance's accident scenario, the application process used the golang plugin mechanism to load `.so` from the outside to run, and the `.so` integrated with Shmipc and ran in the application process. +3. **Shared memory trampling**. '**memfd**' should be used as much as possible to share memory, rather than the path of 'mmap' file system. + In the early days, the 'mmap' file system path was used for shared memory. The startup process of Shmipc and the path of shared memory were specified by environment variables, + and the boot process was injected into the application process. There is a situation where the application process may fork a process, which inherits the environment variables of the application process + and also integrates Shmipc. The forked process and the application process mmaped the same shared memory, resulting in trampling. + In ByteDance's accident scenario, the application process used the golang plugin mechanism to load `.so` from the outside to run, and the `.so` integrated with Shmipc and ran in the application process. It could see all the environment variables, so it and the application process mmaped the same shared memory, resulting in undefined behavior during the operation. -4. **Sigbus coredump**. In the early days, shared memory is achieved through mmaping files under the `/dev/shm/` path (tmpfs), and most application services were running in docker container instances. - Container instances have capacity limits on tmpfs (which can be observed through df -h). This may cause a Sigbus error when the shared memory of mmap exceeds this limit. - There will be no error reported by mmap itself, but the application process will crash when it accesses memory beyond the limit during runtime. +4. **Sigbus coredump**. In the early days, shared memory is achieved through mmaping files under the `/dev/shm/` path (tmpfs), and most application services were running in docker container instances. + Container instances have capacity limits on tmpfs (which can be observed through df -h). This may cause a Sigbus error when the shared memory of mmap exceeds this limit. + There will be no error reported by mmap itself, but the application process will crash when it accesses memory beyond the limit during runtime. To solve this problem, use '**memfd**' to share memory, as in the third point. ## RoadMap + 1. Integrate with the Golang RPC framework [CloudWeGo/Kitex](https://github.com/cloudwego/kitex)。 2. Integrate with the Golang HTTP framework [CloudWeGo/Hertz](https://github.com/cloudwego/hertz)。 3. Open-source Rust version of Shmipc and integrate with the Rust RPC framework [CloudWeGo/Volo](https://github.com/cloudwego/volo)。 @@ -109,9 +124,10 @@ During the adoption process at ByteDance, we also encountered some pitfalls that 5. Introduce a timed synchronization mechanism for offline scenarios. 6. Introduce a polling synchronization mechanism for scenarios with extreme latency requirements. 7. Empower other IPC scenarios, such as IPC between Log SDK and Log Agent, IPC between Metrics SDK and Metrics Agent, etc. - + ## Conclusion -We hope that this article can provide a basic understanding of Shmipc and its design principles. More implementation details and usage methods -can be found in the projects of [shmipc-spec](https://github.com/cloudwego/shmipc-spec) and [shmipc-go](https://github.com/cloudwego/shmipc-go). -Issues and PRs are always welcomed to the Shmipc project as well as the [CloudWeGo](https://github.com/cloudwego) community. + +We hope that this article can provide a basic understanding of Shmipc and its design principles. More implementation details and usage methods +can be found in the projects of [shmipc-spec](https://github.com/cloudwego/shmipc-spec) and [shmipc-go](https://github.com/cloudwego/shmipc-go). +Issues and PRs are always welcomed to the Shmipc project as well as the [CloudWeGo](https://github.com/cloudwego) community. We also hope that Shmipc can help more developers and enterprises build high-performance cloud-native architectures in the IPC field. diff --git a/content/en/blog/news/_index.md b/content/en/blog/news/_index.md index e610acb0bb..ae5042fe4a 100644 --- a/content/en/blog/news/_index.md +++ b/content/en/blog/news/_index.md @@ -1,6 +1,7 @@ --- title: "News" linkTitle: "News" +projects: [] weight: 1 --- diff --git a/content/en/blog/news/article_to_learn_about_CloudWeGo/index.md b/content/en/blog/news/article_to_learn_about_CloudWeGo/index.md index fb3c7bc9b0..9684e782fc 100644 --- a/content/en/blog/news/article_to_learn_about_CloudWeGo/index.md +++ b/content/en/blog/news/article_to_learn_about_CloudWeGo/index.md @@ -1,6 +1,7 @@ --- date: 2022-03-25 title: "An Article to Learn About ByteDance Microservices Middleware CloudWeGo" +projects: ["CloudWeGo"] linkTitle: "An Article to Learn About ByteDance Microservices Middleware CloudWeGo" keywords: ["CloudWeGo", "Microservices", "ByteDance", "Open Source"] description: "This blog introduces the open-source background, advantages, limitations and goals of CloudWeGo from an external perspective through interviews." diff --git a/content/en/blog/news/bytedance_gonet_practice/index.md b/content/en/blog/news/bytedance_gonet_practice/index.md index d5793a2b57..518415af96 100644 --- a/content/en/blog/news/bytedance_gonet_practice/index.md +++ b/content/en/blog/news/bytedance_gonet_practice/index.md @@ -1,6 +1,7 @@ --- date: 2020-05-24 title: "ByteDance Practices on Go Network Library" +projects: ["Netpoll"] linkTitle: "ByteDance Practices on Go Network Library" keywords: ["Netpoll", "Go", "epoll", "Network Library", "Multiplexing", "ZeroCopy"] description: "This blog introduces the design and practice of Bytedance self-developed network library Netpoll as well as the actual problems and solutions, hope to provide you with some reference." @@ -23,17 +24,18 @@ Since there are many articles discussing the principles of "epoll", this article ### Reactor - Event Monitoring and the Core of Scheduling -The core of "Netpoll" is the event monitoring scheduler -- "Reactor", which uses "epoll" to monitor the "File Descriptor (fd)" of the connection and triggers the read, write and close events on the connection through the callback mechanism.
+The core of "Netpoll" is the event monitoring scheduler -- "Reactor", which uses "epoll" to monitor the "File Descriptor (fd)" of the connection and triggers the read, write and close events on the connection through the callback mechanism.
![image](/img/blog/bytedance_gonet_practice_img/reactor.png) ### Server - MainReactor & SubReactor Implementation Netpoll combines Reactors in a 1: N master-slave pattern: + 1. "MainReactor" mainly manages the "Listener", and is responsible for monitoring ports and establishing new connections. 2. The "SubReactor" manages the "Connection", listens all assigned connections, and submits all triggered events to the goroutine pool for processing. 3. "Netpoll" supports "NoCopy RPC" by introducing active memory management in I/O tasks and providing an "NoCopy" invocation interface to the upper layer. 4. Add a goroutine pool to centrally process I/O tasks, reduce the number of goroutines and scheduling overhead. -
+
![image](/img/blog/bytedance_gonet_practice_img/server_reactor.png) @@ -48,6 +50,7 @@ SubReactor is shared between the client and server. Netpoll implements "Dialer" ### Why Nocopy Buffer? As mentioned earlier, the way epoll is triggered affects the design of I/O and buffer, which can be generally divided into two approaches: + - **Level Trigger (LT)**. It is necessary to complete I/O actively after the event is triggered, and provides buffers directly to the upper code. - **Edge Trigger (ET)**. You can choose to manage the event notification only (e.g. go net), with the upper layer code for I/O completion and buffers management. @@ -58,20 +61,22 @@ However, using LT also brings another problem, namely the additional concurrency On the other hand, common buffer libraries such as "bytes", "bufio", and "ringbuffer" have problems such as "growth" requiring copy of data from the original array; capacity can only be expanded but can't be reduced; occupying a large amount of memory etc. Therefore, we hope to introduce a new form of buffer to solve the two problems above. ### The Design and Advantages of Nocopy Buffer + Nocopy Buffer is implemented based on linked-list of array. As shown in the figure below, we abstract []byte array into blocks and combine blocks into Nocopy Buffer in the form of a linked list. Meanwhile, reference counting mechanism, Nocopy API and object pool are introduced.
-![image](/img/blog/bytedance_gonet_practice_img/buffer.png)
+![image](/img/blog/bytedance_gonet_practice_img/buffer.png)
Nocopy Buffer has the following advantages over some common buffer libraries like "bytes", "bufio", and "ringbuffer": + 1. Read and write in parallel without lock, and supports stream read/write with nocopy - - Read and write operate the head pointer and tail pointer separately without interfering with each other. + - Read and write operate the head pointer and tail pointer separately without interfering with each other. 2. Efficient capacity expansion and reduction - - For capacity expansion, you can add new blocks directly after the tail pointer without copying the original array. - - For capacity reduction, the head pointer directly releases the used block node to complete the capacity reduction. Each block has an independent reference count, and when the freed block is no longer referenced, the block node is actively reclaimed. + - For capacity expansion, you can add new blocks directly after the tail pointer without copying the original array. + - For capacity reduction, the head pointer directly releases the used block node to complete the capacity reduction. Each block has an independent reference count, and when the freed block is no longer referenced, the block node is actively reclaimed. 3. Flexible slicing and splicing of buffer (the characteristic of linked list) - - Support arbitrary read slicing (nocopy), and the upper layer code can process data stream slicing in parallel with nocopy by reference counting GC, regardless of the lifecycle. - - Support arbitrary splicing (nocopy). Buffer write supports splicing block after the tail pointer, without copy, and ensuring that data is written only once. + - Support arbitrary read slicing (nocopy), and the upper layer code can process data stream slicing in parallel with nocopy by reference counting GC, regardless of the lifecycle. + - Support arbitrary splicing (nocopy). Buffer write supports splicing block after the tail pointer, without copy, and ensuring that data is written only once. 4. Nocopy Buffer is pooled to reduce GC - - Treat each []byte array as a block node, and build an object pool to maintain free blocks, thus reuse blocks, reduce memory footprint and GC. Based on the Nocopy Buffer, we implemented Nocopy Thrift, so that the codec process allocates zero memory with zero copy. + - Treat each []byte array as a block node, and build an object pool to maintain free blocks, thus reuse blocks, reduce memory footprint and GC. Based on the Nocopy Buffer, we implemented Nocopy Thrift, so that the codec process allocates zero memory with zero copy. ## Connection Multiplexing @@ -81,34 +86,35 @@ There are some existing open-source connection multiplexing solutions. But they The design of Netpoll-based connection multiplexing is shown in the figure below. We abstract the Nocopy Buffer(and its sharding) into virtual connections, so that the upper layer code retains the same calling experience as "net.Conn". At the same time, the data on the real connection can be flexibly allocated to the virtual connection through protocol subcontracting in the underlying code. Or send virtual connection data through protocol encoding.
-![image](/img/blog/bytedance_gonet_practice_img/client_server.png)
+![image](/img/blog/bytedance_gonet_practice_img/client_server.png)
The connection multiplexing scheme contains the following core elements: + 1. The virtual connection - - It is essentially a "Nocopy Buffer", designed to replace real connections and avoid memory copy. - - The upper-layer service logic/codec is executed on the virtual connection, and the upper-layer logic can be executed in parallel asynchronously and independently. + - It is essentially a "Nocopy Buffer", designed to replace real connections and avoid memory copy. + - The upper-layer service logic/codec is executed on the virtual connection, and the upper-layer logic can be executed in parallel asynchronously and independently. 2. Shared map - - Shared locking is introduced to reduce the lock intensity. - - The Sequence ID is used to mark the request on the caller side and the shared lock is used to store the callback corresponding to the ID. - - After receiving the response data, find the corresponding callback based on the sequence ID and execute it. + - Shared locking is introduced to reduce the lock intensity. + - The Sequence ID is used to mark the request on the caller side and the shared lock is used to store the callback corresponding to the ID. + - After receiving the response data, find the corresponding callback based on the sequence ID and execute it. 3. Data subcontracting and encoding - - How to identify the complete request-response data package is the key to make the connection multiplexing scheme feasible, so the protocol needs to be introduced. - - The "Thrift Header Protocol" is used to check the data package integrity through the message header, and sequence ids are used to mark the corresponding relations between request and response. + - How to identify the complete request-response data package is the key to make the connection multiplexing scheme feasible, so the protocol needs to be introduced. + - The "Thrift Header Protocol" is used to check the data package integrity through the message header, and sequence ids are used to mark the corresponding relations between request and response. ## ZeroCopy -"ZeroCopy" refers to the ZeroCopy function provided by Linux. In the previous chapter, we discussed nocopy of the service layer. But as we know, when we call the "sendmsg" system-call to send a data package, actually there is still a copy of the data, and the overhead of such copies is considerable when the data packages are large. For example, when the data package has the size of 100M, we can see the following result:
-![image](/img/blog/bytedance_gonet_practice_img/perf.png)
+"ZeroCopy" refers to the ZeroCopy function provided by Linux. In the previous chapter, we discussed nocopy of the service layer. But as we know, when we call the "sendmsg" system-call to send a data package, actually there is still a copy of the data, and the overhead of such copies is considerable when the data packages are large. For example, when the data package has the size of 100M, we can see the following result:
+![image](/img/blog/bytedance_gonet_practice_img/perf.png)
The previous example is merely the overhead of tcp package sending. In our scenario, most services are connected to the "Service Mesh". Therefore, there are three copies in a package sending: Service process to kernel, kernel to sidecar, sidecar to kernel. This makes the CPU usage caused by copying especially heavy for services demanding large package transactions, as shown in the following figure:
-![image](/img/blog/bytedance_gonet_practice_img/service_mesh_copy.png)
+![image](/img/blog/bytedance_gonet_practice_img/service_mesh_copy.png)
To solve this problem, we chose to use the ZeroCopy API provided by Linux (send is supported after 4.14; receive is supported after 5.4). But this introduces an additional engineering problem: the ZeroCopy send API is incompatible with the original call method and does not coexist well. Here's how ZeroCopy Send works: After the service process calls "sendmsg", "sendmsg" records the address of the "iovec" and returns it immediately. In this case, the service process cannot release the memory, and needs to wait for the kernel to send a signal indicating that an "iovec" has been successfully sent before it can be released via "epoll". Since we don't want to change the way the business side uses it, we need to provide a synchronous sending and receiving interface to the upper layer, so it is difficult to provide both ZeroCopy and non-Zerocopy abstraction based on the existing API. Since ZeroCopy has performance degradation in small package scenarios, this is not the default option. Thus, the ByteDance Service Framework Team collaborated with the ByteDance Kernel Team. The Kernel Team provided the synchronous interface: when "sendmsg" is called, the kernel listens and intercepts the original kernel callback to the service, and doesn't let "sendmsg" return values until the callback is complete. This allows us to easily plug in "ZeroCopy send" without changing the original model. Meanwhile, the ByteDance Kernel Team also implements ZeroCopy based on Unix domain socket, which enables zero-copy communication between service processes and Mesh sidecar. After using "ZeroCopy send", we can see that the kernel is no longer occupied by copy through perf:
-![image](/img/blog/bytedance_gonet_practice_img/perf2.png)
+![image](/img/blog/bytedance_gonet_practice_img/perf2.png)
In terms of CPU usage, ZeroCopy can save half the cpu of non-ZeroCopy in large package scenarios. ## Delay Caused By Go Scheduling @@ -117,15 +123,17 @@ In terms of CPU usage, ZeroCopy can save half the cpu of non-ZeroCopy in large p In our practice, we found that although our newly written "Netpoll" outperformed the "Go net" library in terms of avg delay, it was generally higher than the "Go net" library in terms of p99 and max delay, and the spikes would be more obvious, as shown in the following figure (Go 1.13, Netpoll + multiplexing in blue, Netpoll + persistent connection in green, Go net library + persistent connection in yellow):
-![image](/img/blog/bytedance_gonet_practice_img/delay.png)
+![image](/img/blog/bytedance_gonet_practice_img/delay.png)
+ +We tried many ways to improve it, but the outcomes were unsatisfactory. Finally, we locate that the delay was not caused by the overhead of "Netpoll" itself, but by the scheduling of Go, for example: -We tried many ways to improve it, but the outcomes were unsatisfactory. Finally, we locate that the delay was not caused by the overhead of "Netpoll" itself, but by the scheduling of Go, for example: 1. In "Netpoll", the "SubReactor" itself is also a "goroutine", which is affected by scheduling and cannot be guaranteed to be executed immediately after the "EpollWait" callback, so there would be a delay here. 2. At the same time, since the "SubReactor" used to handle I/O events and the "MainReactor" used to handle connection listening are "goroutines" themselves, it is actually impossible to ensure that these reactors can be executed in parallel under multi-kernel conditions. Even in the most extreme cases, these reactors may be under the same P, and eventually become sequential execution, which cannot take full advantage of multi-kernel; 3. After "EpollWait callback", I/O events are processed serially in the "SubReactor", so the last event may have a long tail problem. 4. In connection multiplexing scenarios, since each connection is bound to a "SubReactor", the delay is entirely dependent on the scheduling of the "SubReactor", resulting in more pronounced spikes. Because Go has specific improvements for the net library in runtime, the net library will not have the above situation. At the same time, the net library is also a "goroutine-per-connection" model, so it ensures that requests can be executed in parallel without interfering with each other. For the above problems, we have two solutions at present: + 1. Modify the Go runtime source code, register a callback in the Go runtime, call EpollWait each time, and pass the fd to the callback execution; 2. Work with the ByteDance Kernel Team to support simultaneous batch read/write of multiple connections to solve sequential problems. In addition, in our tests, Go 1.14 reduces the latency slightly lower and smoother, but the max QPS that can be achieved is lower. I hope our ideas can provide some references to peers in the industry who also encountered this problem. @@ -139,6 +147,3 @@ We hope the above sharing can be helpful to the community. At the same time, we - https://golang.org/src/runtime/proc.go - https://github.com/panjf2000/gnet - https://github.com/tidwall/evio - - - diff --git a/content/en/blog/news/open_source_announcement/index.md b/content/en/blog/news/open_source_announcement/index.md index 2bafc3fd53..2f7071e2a9 100644 --- a/content/en/blog/news/open_source_announcement/index.md +++ b/content/en/blog/news/open_source_announcement/index.md @@ -1,6 +1,7 @@ --- date: 2021-09-13 title: "CloudWeGo Open Source Announcement" +projects: ["CloudWeGo"] linkTitle: "CloudWeGo Open Source Announcement" keywords: ["CloudWeGo", "Open Source", "microservice", "ByteDance"] description: "ByteDance now offers open source through CloudWeGo!" @@ -9,9 +10,9 @@ author: ByteDance Architecture Team ## Background -ByteDance is proud to announce the launch of open source software [CloudWeGo](https://github.com/cloudwego). Focusing on microservice communication and governance, it offers high performance, strong extensibility, and high reliability which enables quick construction of an enterprise-level cloud native architecture. +ByteDance is proud to announce the launch of open source software [CloudWeGo](https://github.com/cloudwego). Focusing on microservice communication and governance, it offers high performance, strong extensibility, and high reliability which enables quick construction of an enterprise-level cloud native architecture. -ByteDance uses Golang as its main development language, and supports the reliable communication of tens of thousands of Golang microservices. We are experienced in microservices after practicing under massive traffic, and so we decided to offer open source software in order to enrich the community's ecology. +ByteDance uses Golang as its main development language, and supports the reliable communication of tens of thousands of Golang microservices. We are experienced in microservices after practicing under massive traffic, and so we decided to offer open source software in order to enrich the community's ecology. We have built the CloudWeGo project to gradually open source the internal microservices system and try to make the projects friendly to external users, and our internal projects will also use this open source project as a library for iterative development. CloudWeGo will follow a key principle of maintaining one set of code internally and externally, iterating them as a whole. As we needed to migrate our internal users to open source libraries transparently, we did not initially pursue any publicity. However, it has been gratifying to see Kitex gain 1.2k stars and Netpoll gain 700+ stars within one month organically. @@ -23,13 +24,13 @@ Because the projects under CloudWeGo depend on many internal basic tool librarie ## CloudWeGo -To begin with, the two main projects included within CloudWeGo are the [Kitex](https://github.com/cloudwego/kitex) RPC framework and the [Netpoll](https://github.com/cloudwego/netpoll) network library. We chose not to publicise these projects prematurely, to ensure our open source technologies were ready and had sufficient verification upon launch. +To begin with, the two main projects included within CloudWeGo are the [Kitex](https://github.com/cloudwego/kitex) RPC framework and the [Netpoll](https://github.com/cloudwego/netpoll) network library. We chose not to publicise these projects prematurely, to ensure our open source technologies were ready and had sufficient verification upon launch. ### Kitex -Kitex [kaɪt'eks] is a **high-performance** and **strong-extensibility** Golang RPC framework used in Bytedance. Before Kitex, the internal Golang framework was Kite, which was strongly coupled with Thrift - the code generation part of which covered intricate logic in the code. +Kitex [kaɪt'eks] is a **high-performance** and **strong-extensibility** Golang RPC framework used in Bytedance. Before Kitex, the internal Golang framework was Kite, which was strongly coupled with Thrift - the code generation part of which covered intricate logic in the code. -Due to these factors, it was difficult to optimize the framework from the network model or codec level. +Due to these factors, it was difficult to optimize the framework from the network model or codec level. Adding new features will inevitably lead to more bloated code and would have hindered the iteration process. Instead we designed a new framework, Kitex, to address these concerns. Although Kitex is a new framework, it has been applied online internally for more than a year. At present, more than 50% of Golang microservices in Bytedance use Kitex. @@ -75,7 +76,7 @@ Netpoll has been designed to solve these problems. It draws inspiration from the ### Thriftgo -Thriftgo is an implementation of [thrift](https://thrift.apache.org/docs/idl) compiler in go language that supports complete syntax and semantic checking of Thrift IDL. +Thriftgo is an implementation of [thrift](https://thrift.apache.org/docs/idl) compiler in go language that supports complete syntax and semantic checking of Thrift IDL. Compared with the official Golang code generation by Apache Thrift, Thriftgo made some bug fixes and supports a plugin mechanism. Users can customize the generated code according to their needs. @@ -83,8 +84,7 @@ Thriftgo is the code generation tool of Kitex. CloudWeGo will soon opensource ** Although Thriftgo currently only supports the generation of Golang Thrift code, it is positioned to support Thrift code generation in various languages. If there is a need in future, we will also consider supporting code generation for other programming languages. At the same time, we will try to contribute Thriftgo to the Apache Thrift community. - -## Maintenance +## Maintenance A complete microservice system builds upon a basic cloud ecosystem. No matter how the microservices are developed; based on the public cloud, a private cloud or your own infrastructure, additional services (including service governance platform, monitoring, tracing, service registry and discovery, configuration and service mesh etc) and some customized standards are needed to provide better service governance. At Bytedance we have complete internal services to support the microservice system, but these services cannot be open source in the short term. So, how will CloudWeGo maintain a set of code internally and externally, and iterate them as a whole? @@ -94,24 +94,23 @@ Kitex's code is split into two parts, including the core of Kitex which has been For open source users who use Kitex, they can also extend Kitex and integrate Kitex into their own microservice system. We hope, and expect, that more developers will contribute their own extensions to [kitex-contrib](https://github.com/kitex-contrib), providing help and convenience for more users. -## Future directions +## Future directions - - Open source other internal projects +- Open source other internal projects We will continue to open source other internal projects, such as HTTP framework **Hertz**, shared memory-based IPC communication library **ShmIPC** and others, to provide more support for microservice scenarios. - - Open source verified and stable features +- Open source verified and stable features The main projects of CloudWeGo provide support for internal microservices of Bytedance. New features are usually verified internally, and we will gradually open source them when they are relatively mature, such as the integration of **ShmIPC**, **no serialization**, and **no code generation**. - - Combine internal and external needs and iterate +- Combine internal and external needs and iterate After launching open source software, in addition to supporting internal users we also hope that CloudWeGo can provide good support for external users and help everyone quickly build their own microservice system. As such, we will iterate based on the needs of both internal and external users. -Following initial feedback, users have shown a stronger demand for Protobuf. Although Kitex supports multiple protocols, the internal RPC communication protocol of Bytedance is Thrift. Protobuf, Kitex Protobuf or compatibility with gRPC is supported only to fulfill the needs of a small number of internal users, so performance [for Protobuf] has not been optimized yet. In terms of code generation, we have not made any optimizations, and currently utilize Protobuf's official binary directly. +Following initial feedback, users have shown a stronger demand for Protobuf. Although Kitex supports multiple protocols, the internal RPC communication protocol of Bytedance is Thrift. Protobuf, Kitex Protobuf or compatibility with gRPC is supported only to fulfill the needs of a small number of internal users, so performance [for Protobuf] has not been optimized yet. In terms of code generation, we have not made any optimizations, and currently utilize Protobuf's official binary directly. -Gogo/protobuf is an excellent open source library that optimizes Protobuf serialization performance based on generated code, but unfortunately the library is currently out of maintenance, which is why Kitex did not choose gogo. +Gogo/protobuf is an excellent open source library that optimizes Protobuf serialization performance based on generated code, but unfortunately the library is currently out of maintenance, which is why Kitex did not choose gogo. In order to meet the growing needs of developers, we are planning to carry out Kitex's performance optimization for Protobuf support. -You are welcome to submit issues and PRs to build CloudWeGo together. We are excited for more developers to join, and also look forward to CloudWeGo helping more and more companies quickly build cloud-native architectures. If any corporate customers want to employ CloudWeGo in your internal projects, we can provide technical support. Feel free to raise an issue in [Github](https://github.com/cloudwego) if you have any questions. - +You are welcome to submit issues and PRs to build CloudWeGo together. We are excited for more developers to join, and also look forward to CloudWeGo helping more and more companies quickly build cloud-native architectures. If any corporate customers want to employ CloudWeGo in your internal projects, we can provide technical support. Feel free to raise an issue in [Github](https://github.com/cloudwego) if you have any questions. diff --git a/content/en/blog/news/open_source_volo/index.md b/content/en/blog/news/open_source_volo/index.md index b369b3c604..9ce13aaa01 100644 --- a/content/en/blog/news/open_source_volo/index.md +++ b/content/en/blog/news/open_source_volo/index.md @@ -1,6 +1,7 @@ --- date: 2022-08-30 title: "China's First Rust-based RPC Framework - Volo is Officially Open Source!" +projects: ["Volo"] linkTitle: "China's first RPC framework based on Rust language - Volo is officially open source!" keywords: ["CloudWeGo", "RPC framework", "Volo", "Rust", "ByteDance Open Source", "open source"] description: "This article introduces ByteDance's official open source Rust RPC framework — Volo, and focuses on the project's origin, main features and related ecosystem." diff --git a/content/en/blog/releases/Hertz/_index.md b/content/en/blog/releases/Hertz/_index.md index cfca9040b6..5bc00f1b4e 100644 --- a/content/en/blog/releases/Hertz/_index.md +++ b/content/en/blog/releases/Hertz/_index.md @@ -1,5 +1,6 @@ --- title: "Hertz Release" linkTitle: "Hertz" +projects: ["Hertz"] weight: 2 --- diff --git a/content/en/blog/releases/Hertz/release-v010.md b/content/en/blog/releases/Hertz/release-v010.md index fd906d0942..ec5da28a25 100644 --- a/content/en/blog/releases/Hertz/release-v010.md +++ b/content/en/blog/releases/Hertz/release-v010.md @@ -1,6 +1,7 @@ --- title: "Hertz Release v0.1.0" linkTitle: "Release v0.1.0" +projects: ["Hertz"] date: 2022-06-20 description: > --- diff --git a/content/en/blog/releases/Hertz/release-v020.md b/content/en/blog/releases/Hertz/release-v020.md index 6b7c7c86d3..04439afe2e 100644 --- a/content/en/blog/releases/Hertz/release-v020.md +++ b/content/en/blog/releases/Hertz/release-v020.md @@ -1,6 +1,7 @@ --- title: "Hertz Release v0.2.0" linkTitle: "Release v0.2.0" +projects: ["Hertz"] date: 2022-07-22 description: > --- diff --git a/content/en/blog/releases/Hertz/release-v030.md b/content/en/blog/releases/Hertz/release-v030.md index 352324d4e8..d99cfe5309 100644 --- a/content/en/blog/releases/Hertz/release-v030.md +++ b/content/en/blog/releases/Hertz/release-v030.md @@ -1,6 +1,7 @@ --- title: "Hertz Release v0.3.0" linkTitle: "Release v0.3.0" +projects: ["Hertz"] date: 2022-08-29 description: > --- diff --git a/content/en/blog/releases/Hertz/release-v032.md b/content/en/blog/releases/Hertz/release-v032.md index 2c6ecccf16..356b42f061 100644 --- a/content/en/blog/releases/Hertz/release-v032.md +++ b/content/en/blog/releases/Hertz/release-v032.md @@ -1,6 +1,7 @@ --- title: "Hertz Release v0.3.2" linkTitle: "Release v0.3.2" +projects: ["Hertz"] date: 2022-09-20 description: > --- diff --git a/content/en/blog/releases/Hertz/release-v040.md b/content/en/blog/releases/Hertz/release-v040.md index ca25d6e728..e0f915a497 100644 --- a/content/en/blog/releases/Hertz/release-v040.md +++ b/content/en/blog/releases/Hertz/release-v040.md @@ -1,6 +1,7 @@ --- title: "Hertz Release v0.4.0" linkTitle: "Release v0.4.0" +projects: ["Hertz"] date: 2022-10-28 description: > --- diff --git a/content/en/blog/releases/Hertz/release-v050.md b/content/en/blog/releases/Hertz/release-v050.md index fe9b66f24e..ad2574b76b 100644 --- a/content/en/blog/releases/Hertz/release-v050.md +++ b/content/en/blog/releases/Hertz/release-v050.md @@ -1,6 +1,7 @@ --- title: "Hertz Release v0.5.0" linkTitle: "Release v0.5.0" +projects: ["Hertz"] date: 2023-01-12 description: > --- diff --git a/content/en/blog/releases/Hertz/release-v060.md b/content/en/blog/releases/Hertz/release-v060.md index b7222a3ccb..4423c97d04 100644 --- a/content/en/blog/releases/Hertz/release-v060.md +++ b/content/en/blog/releases/Hertz/release-v060.md @@ -1,6 +1,7 @@ --- title: "Hertz Release v0.6.0" linkTitle: "Release v0.6.0" +projects: ["Hertz"] date: 2023-03-02 description: > --- diff --git a/content/en/blog/releases/Kitex/_index.md b/content/en/blog/releases/Kitex/_index.md index 37162be203..8cad80ba7e 100644 --- a/content/en/blog/releases/Kitex/_index.md +++ b/content/en/blog/releases/Kitex/_index.md @@ -1,5 +1,6 @@ --- title: "Kitex Release" linkTitle: "Kitex" +projects: ["Kitex"] weight: 1 --- \ No newline at end of file diff --git a/content/en/blog/releases/Kitex/release-v001.md b/content/en/blog/releases/Kitex/release-v001.md index 05b136a2b4..f30b30ab63 100755 --- a/content/en/blog/releases/Kitex/release-v001.md +++ b/content/en/blog/releases/Kitex/release-v001.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.0.1" linkTitle: "Release v0.0.1" +projects: ["Kitex"] date: 2021-07-12 description: > diff --git a/content/en/blog/releases/Kitex/release-v002.md b/content/en/blog/releases/Kitex/release-v002.md index ce371d1f4d..88475fae03 100644 --- a/content/en/blog/releases/Kitex/release-v002.md +++ b/content/en/blog/releases/Kitex/release-v002.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.0.2" linkTitle: "Release v0.0.2" +projects: ["Kitex"] date: 2021-07-30 description: > diff --git a/content/en/blog/releases/Kitex/release-v003.md b/content/en/blog/releases/Kitex/release-v003.md index a3e83c1a85..512ba0f6b7 100644 --- a/content/en/blog/releases/Kitex/release-v003.md +++ b/content/en/blog/releases/Kitex/release-v003.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.0.3" linkTitle: "Release v0.0.3" +projects: ["Kitex"] date: 2021-08-01 description: > diff --git a/content/en/blog/releases/Kitex/release-v004.md b/content/en/blog/releases/Kitex/release-v004.md index eb0fcf58c9..c73139552f 100644 --- a/content/en/blog/releases/Kitex/release-v004.md +++ b/content/en/blog/releases/Kitex/release-v004.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.0.4" linkTitle: "Release v0.0.4" +projects: ["Kitex"] date: 2021-08-26 description: > diff --git a/content/en/blog/releases/Kitex/release-v005.md b/content/en/blog/releases/Kitex/release-v005.md index bccc6c7c07..07d44651e4 100644 --- a/content/en/blog/releases/Kitex/release-v005.md +++ b/content/en/blog/releases/Kitex/release-v005.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.0.5" linkTitle: "Release v0.0.5" +projects: ["Kitex"] date: 2021-09-26 description: > diff --git a/content/en/blog/releases/Kitex/release-v008.md b/content/en/blog/releases/Kitex/release-v008.md index 2c67239ba7..fba5f4901a 100644 --- a/content/en/blog/releases/Kitex/release-v008.md +++ b/content/en/blog/releases/Kitex/release-v008.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.0.8" linkTitle: "Release v0.0.8" +projects: ["Kitex"] date: 2021-11-05 description: > --- diff --git a/content/en/blog/releases/Kitex/release-v010.md b/content/en/blog/releases/Kitex/release-v010.md index ac902f4a68..741f60a96c 100644 --- a/content/en/blog/releases/Kitex/release-v010.md +++ b/content/en/blog/releases/Kitex/release-v010.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.1.0" linkTitle: "Release v0.1.0" +projects: ["Kitex"] date: 2021-12-13 description: > diff --git a/content/en/blog/releases/Kitex/release-v012.md b/content/en/blog/releases/Kitex/release-v012.md index 2736dec81a..c0d0b2867d 100644 --- a/content/en/blog/releases/Kitex/release-v012.md +++ b/content/en/blog/releases/Kitex/release-v012.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.1.2" linkTitle: "Release v0.1.2" +projects: ["Kitex"] date: 2021-12-22 description: > diff --git a/content/en/blog/releases/Kitex/release-v013.md b/content/en/blog/releases/Kitex/release-v013.md index d33a98feb1..4726e79e60 100644 --- a/content/en/blog/releases/Kitex/release-v013.md +++ b/content/en/blog/releases/Kitex/release-v013.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.1.3" linkTitle: "Release v0.1.3" +projects: ["Kitex"] date: 2021-12-30 description: > diff --git a/content/en/blog/releases/Kitex/release-v014.md b/content/en/blog/releases/Kitex/release-v014.md index 9051820532..213074a855 100644 --- a/content/en/blog/releases/Kitex/release-v014.md +++ b/content/en/blog/releases/Kitex/release-v014.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.1.4" linkTitle: "Release v0.1.4" +projects: ["Kitex"] date: 2022-01-18 description: > diff --git a/content/en/blog/releases/Kitex/release-v020.md b/content/en/blog/releases/Kitex/release-v020.md index dd18ac3162..26b1b85146 100644 --- a/content/en/blog/releases/Kitex/release-v020.md +++ b/content/en/blog/releases/Kitex/release-v020.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.2.0" linkTitle: "Release v0.2.0" +projects: ["Kitex"] date: 2022-02-24 description: > diff --git a/content/en/blog/releases/Kitex/release-v021.md b/content/en/blog/releases/Kitex/release-v021.md index 0d8b15fd3b..1b5a1955f7 100644 --- a/content/en/blog/releases/Kitex/release-v021.md +++ b/content/en/blog/releases/Kitex/release-v021.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.2.1" linkTitle: "Release v0.2.1" +projects: ["Kitex"] date: 2022-03-24 description: > diff --git a/content/en/blog/releases/Kitex/release-v030.md b/content/en/blog/releases/Kitex/release-v030.md index 19d07e028a..7f0b750cb8 100644 --- a/content/en/blog/releases/Kitex/release-v030.md +++ b/content/en/blog/releases/Kitex/release-v030.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.3.0" linkTitle: "Release v0.3.0" +projects: ["Kitex"] date: 2022-04-29 description: > diff --git a/content/en/blog/releases/Kitex/release-v032.md b/content/en/blog/releases/Kitex/release-v032.md index e5059a8ee3..f6df7a18e3 100644 --- a/content/en/blog/releases/Kitex/release-v032.md +++ b/content/en/blog/releases/Kitex/release-v032.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.3.2" linkTitle: "Release v0.3.2" +projects: ["Kitex"] date: 2022-06-02 description: > --- diff --git a/content/en/blog/releases/Kitex/release-v040.md b/content/en/blog/releases/Kitex/release-v040.md index bf234eb93e..1dac3614b8 100644 --- a/content/en/blog/releases/Kitex/release-v040.md +++ b/content/en/blog/releases/Kitex/release-v040.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.4.0" linkTitle: "Release v0.4.0" +projects: ["Kitex"] date: 2022-08-26 description: > --- diff --git a/content/en/blog/releases/Kitex/release-v043.md b/content/en/blog/releases/Kitex/release-v043.md index 2a430f1fde..a5554b8b03 100644 --- a/content/en/blog/releases/Kitex/release-v043.md +++ b/content/en/blog/releases/Kitex/release-v043.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.4.3" linkTitle: "Release v0.4.3" +projects: ["Kitex"] date: 2022-11-02 description: > --- diff --git a/content/en/blog/releases/Kitex/release-v050.md b/content/en/blog/releases/Kitex/release-v050.md index 5b56646fae..1aef80ccce 100644 --- a/content/en/blog/releases/Kitex/release-v050.md +++ b/content/en/blog/releases/Kitex/release-v050.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.5.0" linkTitle: "Release v0.5.0" +projects: ["Kitex"] date: 2023-03-08 description: > --- diff --git a/content/en/blog/releases/Kitex/release-v052.md b/content/en/blog/releases/Kitex/release-v052.md index 96cd9695e9..a874d1c1b7 100644 --- a/content/en/blog/releases/Kitex/release-v052.md +++ b/content/en/blog/releases/Kitex/release-v052.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.5.3" linkTitle: "Release v0.5.3" +projects: ["Kitex"] date: 2023-04-21 description: > --- diff --git a/content/en/blog/releases/Kitex/release-v060.md b/content/en/blog/releases/Kitex/release-v060.md index 0d05788e00..f74b8db8c7 100644 --- a/content/en/blog/releases/Kitex/release-v060.md +++ b/content/en/blog/releases/Kitex/release-v060.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.6.0" linkTitle: "Release v0.6.0" +projects: ["Kitex"] date: 2023-06-14 description: > --- diff --git a/content/en/blog/releases/Kitex/release-v061.md b/content/en/blog/releases/Kitex/release-v061.md index 60e978fbd0..3158978db2 100644 --- a/content/en/blog/releases/Kitex/release-v061.md +++ b/content/en/blog/releases/Kitex/release-v061.md @@ -1,6 +1,7 @@ --- title: "Kitex Release v0.6.1" linkTitle: "Release v0.6.1" +projects: ["Kitex"] date: 2023-06-19 description: > --- diff --git a/content/en/blog/releases/Netpoll/_index.md b/content/en/blog/releases/Netpoll/_index.md index 07f7480494..116b2bf284 100644 --- a/content/en/blog/releases/Netpoll/_index.md +++ b/content/en/blog/releases/Netpoll/_index.md @@ -1,5 +1,6 @@ --- title: "Netpoll Release" linkTitle: "Netpoll" +projects: ["Netpoll"] weight: 3 --- diff --git a/content/en/blog/releases/Netpoll/release-v004.md b/content/en/blog/releases/Netpoll/release-v004.md index 734a637f0a..58c0b2433d 100644 --- a/content/en/blog/releases/Netpoll/release-v004.md +++ b/content/en/blog/releases/Netpoll/release-v004.md @@ -1,9 +1,9 @@ --- title: "Netpoll Release v0.0.4" linkTitle: "Release v0.0.4" +projects: ["Netpoll"] date: 2021-09-16 description: > - --- ## Improvement: @@ -20,4 +20,3 @@ description: > ## Bugfix: - Set leftover wait read size - diff --git a/content/en/blog/releases/Netpoll/release-v010.md b/content/en/blog/releases/Netpoll/release-v010.md index e3a6995fa7..6f054e4588 100644 --- a/content/en/blog/releases/Netpoll/release-v010.md +++ b/content/en/blog/releases/Netpoll/release-v010.md @@ -1,28 +1,28 @@ --- title: "Netpoll Release v0.1.0" linkTitle: "Release v0.1.0" +projects: ["Netpoll"] date: 2021-12-01 description: > - --- ## Improvement -* add mux.ShardQueue to support connection multiplexing -* input at a single LinkBuffer Node to improve performance -* fix waitReadSize logic bug and enhance input trigger -* reduce timeout issues when waitRead and inputAck have competition -* unify and simplify conn locks +- add mux.ShardQueue to support connection multiplexing +- input at a single LinkBuffer Node to improve performance +- fix waitReadSize logic bug and enhance input trigger +- reduce timeout issues when waitRead and inputAck have competition +- unify and simplify conn locks ## Bugfix -* ensure EventLoop object will not be finalized before serve return +- ensure EventLoop object will not be finalized before serve return ## Chore -* update readme -* update issue templates +- update readme +- update issue templates ## Breaking Change -* remove WriteBuffer() returned parameter n +- remove WriteBuffer() returned parameter n diff --git a/content/en/blog/releases/Netpoll/release-v011.md b/content/en/blog/releases/Netpoll/release-v011.md index b3c840ff63..3a8b28edeb 100644 --- a/content/en/blog/releases/Netpoll/release-v011.md +++ b/content/en/blog/releases/Netpoll/release-v011.md @@ -1,19 +1,19 @@ --- title: "Netpoll Release v0.1.1" linkTitle: "Release v0.1.1" +projects: ["Netpoll"] date: 2021-12-09 description: > - --- ## Improvement -* enhance mux shard queue +- enhance mux shard queue ## Bugfix -* book never reset readnode +- book never reset readnode ## Chore -* update readme \ No newline at end of file +- update readme diff --git a/content/en/blog/releases/Netpoll/release-v012.md b/content/en/blog/releases/Netpoll/release-v012.md index e0fe7af613..be14d1b483 100644 --- a/content/en/blog/releases/Netpoll/release-v012.md +++ b/content/en/blog/releases/Netpoll/release-v012.md @@ -1,11 +1,11 @@ --- title: "Netpoll Release v0.1.2" linkTitle: "Release v0.1.2" +projects: ["Netpoll"] date: 2021-12-13 description: > - --- ## Hotfix -* check args in LinkBuffer API \ No newline at end of file +- check args in LinkBuffer API diff --git a/content/en/blog/releases/Netpoll/release-v020.md b/content/en/blog/releases/Netpoll/release-v020.md index 9667180c6f..6649a561f5 100644 --- a/content/en/blog/releases/Netpoll/release-v020.md +++ b/content/en/blog/releases/Netpoll/release-v020.md @@ -1,25 +1,25 @@ --- title: "Netpoll Release v0.2.0" linkTitle: "Release v0.2.0" +projects: ["Netpoll"] date: 2022-02-22 description: > - --- ## Improvement -* Feat: on connect callback -* Feat: new conn api - Until -* Feat: support dialing without timeout +- Feat: on connect callback +- Feat: new conn api - Until +- Feat: support dialing without timeout ## Fix -* Fix: trigger close callback if only set the onConnect callback -* Fix: add max node size to prevent OOM -* Fix: FDOperator.reset() not reset op.OnWrite -* Fix: Write panic when conn Close -* Fix: unit tests may fail +- Fix: trigger close callback if only set the onConnect callback +- Fix: add max node size to prevent OOM +- Fix: FDOperator.reset() not reset op.OnWrite +- Fix: Write panic when conn Close +- Fix: unit tests may fail ## Chore -* docs: update readme +- docs: update readme diff --git a/content/en/blog/releases/Netpoll/release-v022.md b/content/en/blog/releases/Netpoll/release-v022.md index d12ef4f92f..f78c4c0f2c 100644 --- a/content/en/blog/releases/Netpoll/release-v022.md +++ b/content/en/blog/releases/Netpoll/release-v022.md @@ -1,29 +1,29 @@ --- title: "Netpoll Release v0.2.2" linkTitle: "Release v0.2.2" +projects: ["Netpoll"] date: 2022-04-28 description: > - --- ## Improvement -* Fix: reduce costs of SetNumLoops -* Chore: update mcache and ci -* Feat: recycle caches when LinkBuffer is closed +- Fix: reduce costs of SetNumLoops +- Chore: update mcache and ci +- Feat: recycle caches when LinkBuffer is closed ## Fix -* Fix: send&close ignored by OnRequest -* Fix: fill lost some data when read io.EOF -* Fix: check is active when flush +- Fix: send&close ignored by OnRequest +- Fix: fill lost some data when read io.EOF +- Fix: check is active when flush ## Doc -* Doc: update guide.md -* Doc: restate the definition of Reader.Slice -* Doc: fix replace examples url +- Doc: update guide.md +- Doc: restate the definition of Reader.Slice +- Doc: fix replace examples url ## Revert -* Revert "feat: change default number of loops policy (#31)" +- Revert "feat: change default number of loops policy (#31)" diff --git a/content/en/blog/releases/Netpoll/release-v030.md b/content/en/blog/releases/Netpoll/release-v030.md index b8e683a64c..9ac561fbb6 100644 --- a/content/en/blog/releases/Netpoll/release-v030.md +++ b/content/en/blog/releases/Netpoll/release-v030.md @@ -1,19 +1,19 @@ --- title: "Netpoll Release v0.3.0" linkTitle: "Release v0.3.0" +projects: ["Netpoll"] date: 2022-11-09 description: > --- ## Feat -* [[#206](https://github.com/cloudwego/netpoll/pull/206)] feat: connection flush support write timeout. -* [[#182](https://github.com/cloudwego/netpoll/pull/182)] feat: dial in ipv6 only. +- [[#206](https://github.com/cloudwego/netpoll/pull/206)] feat: connection flush support write timeout. +- [[#182](https://github.com/cloudwego/netpoll/pull/182)] feat: dial in ipv6 only. ## Fix -* [[#200](https://github.com/cloudwego/netpoll/pull/200)] fix: fd not detach when close by user. -* [[#196](https://github.com/cloudwego/netpoll/pull/196)] fix: limit iovecs max to 2GB(2^31). -* [[#179](https://github.com/cloudwego/netpoll/pull/179)] fix: length overflow. -* [[#183](https://github.com/cloudwego/netpoll/pull/183)] fix: dont check epollout when epollerr. - +- [[#200](https://github.com/cloudwego/netpoll/pull/200)] fix: fd not detach when close by user. +- [[#196](https://github.com/cloudwego/netpoll/pull/196)] fix: limit iovecs max to 2GB(2^31). +- [[#179](https://github.com/cloudwego/netpoll/pull/179)] fix: length overflow. +- [[#183](https://github.com/cloudwego/netpoll/pull/183)] fix: dont check epollout when epollerr. diff --git a/content/en/blog/releases/Netpoll/release-v040.md b/content/en/blog/releases/Netpoll/release-v040.md index ac32abe498..5b89ff9d8b 100644 --- a/content/en/blog/releases/Netpoll/release-v040.md +++ b/content/en/blog/releases/Netpoll/release-v040.md @@ -1,6 +1,7 @@ --- title: "Netpoll Release v0.4.0" linkTitle: "Release v0.4.0" +projects: ["Netpoll"] date: 2023-06-14 description: > --- diff --git a/content/en/blog/releases/Volo/_index.md b/content/en/blog/releases/Volo/_index.md index 9fab39d7c4..f63f18d104 100644 --- a/content/en/blog/releases/Volo/_index.md +++ b/content/en/blog/releases/Volo/_index.md @@ -1,6 +1,7 @@ --- title: "Volo Release" linkTitle: "Volo" +projects: ["Volo"] weight: 4 --- diff --git a/content/en/blog/releases/Volo/release-v020.md b/content/en/blog/releases/Volo/release-v020.md index baf856be57..e8534b3a1b 100644 --- a/content/en/blog/releases/Volo/release-v020.md +++ b/content/en/blog/releases/Volo/release-v020.md @@ -1,6 +1,7 @@ --- title: "Volo Release v0.2.0" linkTitle: "Release v0.2.0" +projects: ["Volo"] date: 2022-10-18 description: > --- diff --git a/content/en/blog/releases/Volo/release-v021.md b/content/en/blog/releases/Volo/release-v021.md index 8f87812e10..b684e8f548 100644 --- a/content/en/blog/releases/Volo/release-v021.md +++ b/content/en/blog/releases/Volo/release-v021.md @@ -1,6 +1,7 @@ --- title: "Volo Release v0.2.1" linkTitle: "Release v0.2.1" +projects: ["Volo"] date: 2022-10-26 description: > --- diff --git a/content/en/blog/releases/Volo/release-v030.md b/content/en/blog/releases/Volo/release-v030.md index e7912757a7..ce7c0ff1e8 100644 --- a/content/en/blog/releases/Volo/release-v030.md +++ b/content/en/blog/releases/Volo/release-v030.md @@ -1,6 +1,7 @@ --- title: "Volo Release 0.3.0" linkTitle: "Release 0.3.0" +projects: ["Volo"] date: 2022-12-22 description: > --- diff --git a/content/en/blog/releases/Volo/release-v032.md b/content/en/blog/releases/Volo/release-v032.md index 6599687e78..cbdf40f915 100644 --- a/content/en/blog/releases/Volo/release-v032.md +++ b/content/en/blog/releases/Volo/release-v032.md @@ -1,6 +1,7 @@ --- title: "Volo Release 0.3.2" linkTitle: "Release 0.3.2" +projects: ["Volo"] date: 2023-02-07 description: > --- diff --git a/content/en/blog/releases/Volo/release-v041.md b/content/en/blog/releases/Volo/release-v041.md index f1eb47222e..1c1aa856e8 100644 --- a/content/en/blog/releases/Volo/release-v041.md +++ b/content/en/blog/releases/Volo/release-v041.md @@ -1,6 +1,7 @@ --- title: 'Volo Release 0.4.1' linkTitle: 'Release v0.4.1' +projects: ["Volo"] date: 2023-03-20 description: > --- diff --git a/content/en/blog/releases/_index.md b/content/en/blog/releases/_index.md index b1d9eb4ff3..9349f60273 100644 --- a/content/en/blog/releases/_index.md +++ b/content/en/blog/releases/_index.md @@ -1,6 +1,7 @@ --- title: "New Releases" +projects: [] linkTitle: "Releases" weight: 20 --- diff --git a/content/en/docs/hertz/tutorials/basic-feature/json.md b/content/en/docs/hertz/tutorials/basic-feature/json.md index d96cfd0b54..bbd45ba882 100644 --- a/content/en/docs/hertz/tutorials/basic-feature/json.md +++ b/content/en/docs/hertz/tutorials/basic-feature/json.md @@ -49,4 +49,3 @@ go build -tags stdjson ## Sonic related issues If there are issues related to Sonic, please refer to Sonic [README](https://github.com/bytedance/sonic) or propose an [issue](https://github.com/bytedance/sonic/issues). - diff --git a/content/en/docs/hertz/tutorials/basic-feature/protocol/http3.md b/content/en/docs/hertz/tutorials/basic-feature/protocol/http3.md index b5bc1d16ae..30760ac576 100644 --- a/content/en/docs/hertz/tutorials/basic-feature/protocol/http3.md +++ b/content/en/docs/hertz/tutorials/basic-feature/protocol/http3.md @@ -42,10 +42,10 @@ h.AddProtocol(suite.HTTP3, factory.NewServerFactory(&http3.Option{})) | **Option** | **Description** | | :----------------- | -------------------------------------------- | | WithTransport | Set the network layer implementation | -| WithAltTransport | Set the alternative network layer implementation. The AltTransporter will be used for parallel listening - both in `TCP` and `QUIC` | -| WithALPN | Set whether to enable `ALPN` | +| WithAltTransport | Set the alternative network layer implementation. The AltTransporter will be used for parallel listening - both in `TCP` and `QUIC` | +| WithALPN | Set whether to enable `ALPN` | | WithTLS | Set `TLS` Config | -| WithHostPorts | Set the host and port for starting the service | +| WithHostPorts | Set the host and port for starting the service | ## Sample Code @@ -236,5 +236,3 @@ func main() { wg.Wait() } ``` - - diff --git a/content/zh/blog/_index.md b/content/zh/blog/_index.md index 1e068f52f9..265e89e1f6 100644 --- a/content/zh/blog/_index.md +++ b/content/zh/blog/_index.md @@ -1,6 +1,7 @@ --- title: "博客" linkTitle: "博客" +projects: [] menu: main: weight: 20 diff --git a/content/zh/blog/news/1st_RPCKitex/index.md b/content/zh/blog/news/1st_RPCKitex/index.md index 9516800d42..a8db71094e 100644 --- a/content/zh/blog/news/1st_RPCKitex/index.md +++ b/content/zh/blog/news/1st_RPCKitex/index.md @@ -1,6 +1,7 @@ --- date: 2022-09-20 title: "高性能 RPC 框架 CloudWeGo-Kitex 内外统一的开源实践" +projects: ["Kitex"] linkTitle: "高性能 RPC 框架 CloudWeGo-Kitex 内外统一的开源实践" keywords: ["Kitex", "CloudWeGo", "RPC", "开源", "Kite", "Golang", "Thrift", "Protobuf", "gRPC", "xDS"] description: "本文介绍了高性能 RPC 框架 CloudWeGo-Kitex 的起源与发展历史,以及开源一年以来的功能特性变更、社区共建生态成果、企业落地实践等方面。" diff --git a/content/zh/blog/news/1st_httpHertz/index.md b/content/zh/blog/news/1st_httpHertz/index.md index 2aa535609e..2b52d33ee9 100644 --- a/content/zh/blog/news/1st_httpHertz/index.md +++ b/content/zh/blog/news/1st_httpHertz/index.md @@ -1,6 +1,7 @@ --- date: 2022-09-27 title: "助力字节降本增效,大规模企业级 HTTP 框架 Hertz 设计实践" +projects: ["Hertz"] linkTitle: "助力字节降本增效,大规模企业级 HTTP 框架 Hertz 设计实践" keywords: ["Hertz", "HTTP", "Golang", "Gin", "高性能","可扩展"] description: "本文描述了字节跳动内部的大规模企业级 HTTP 框架 Hertz 的设计实践,包括 Hertz 的项目起源、架构设计、功能特性,性能表现等方面。" diff --git a/content/zh/blog/news/CloudWeGo_helps_NextArch/index.md b/content/zh/blog/news/CloudWeGo_helps_NextArch/index.md index f7b9159a19..1d8272accb 100644 --- a/content/zh/blog/news/CloudWeGo_helps_NextArch/index.md +++ b/content/zh/blog/news/CloudWeGo_helps_NextArch/index.md @@ -1,6 +1,7 @@ --- date: 2022-04-01 title: "CloudWeGo 助 NextArch 基金会推动标准化建设" +projects: ["CloudWeGo"] linkTitle: "CloudWeGo 助 NextArch 基金会推动标准化建设" keywords: ["CloudWeGo", "NextArch", "微服务", "标准化建设"] description: "CloudWeGo 加入 NextArch 基金会微服务技术小组,推动微服务技术和开源生态的持续发展,针对不同行业和应用场景输出标准化解决方案。" @@ -52,4 +53,3 @@ author: NextArch 字节跳动微服务架构师,CloudWeGo 开源负责人。CloudWeGo 是一套由字节跳动开源的、可快速构建企业级云原生架构的中间件集合,专注于解决微服务通信与治理的难题,具备高性能、可扩展、高可靠的特点。 微服务技术发展至今,业界涌现出一大批微服务开发框架、技术和最佳实践。这个多样化是不可避免的,没有一个微服务开发框架能够统一所有的语言,但是微服务架构里面所涉及的服务治理体系,却可以做到统一和规范化。NextArch 微服务 SIG 正是在这样的背景下诞生了,旨在提供统一服务治理体系,解决共性问题,将促进微服务框架和技术的进一步演进和发展。 - diff --git a/content/zh/blog/news/Evolution_Kitex_HighPerformance/index.md b/content/zh/blog/news/Evolution_Kitex_HighPerformance/index.md index 8810b62fd3..3cf7929a0c 100644 --- a/content/zh/blog/news/Evolution_Kitex_HighPerformance/index.md +++ b/content/zh/blog/news/Evolution_Kitex_HighPerformance/index.md @@ -1,13 +1,14 @@ --- date: 2022-05-19 title: "字节微服务框架的挑战和演进" +projects: ["Kitex"] linkTitle: "字节微服务框架的挑战和演进" -keywords: ["微服务框架","字节跳动", "Kitex", "Kite", "Go"] +keywords: ["微服务框架", "字节跳动", "Kitex", "Kite", "Go"] description: "本文介绍了随着字节跳动内部业务的快速发展,字节微服务框架面临的挑战,以及为了应对这些挑战做了哪些演进,最后介绍了面向开源的 Kitex 的功能特性和内部落地的经典案例等。" author: lsjbd --- -# 字节微服务框架的挑战和演进 +## 字节微服务框架的挑战和演进 2014 年以来,字节跳动内部业务的快速发展,推动了长连接推送服务,它们面临着高并发的业务需求问题,对性能和开发效率都有很高要求。当时的业务,大部分都是由 Python 开发,难以应对新出现的问题。 项目负责人在一众现存的技术栈中选择了 Golang 这一门新兴的编程语言,快速解决了性能和开发效率的问题。随后,字节跳动内部开始逐渐推广使用 Golang 进行服务开发。 @@ -20,11 +21,12 @@ author: lsjbd 直到 2021 年年中,字节跳动内部已有 2w+ 服务使用了 [Kitex][Kitex]。因此,我们决定全面优化 [Kitex][Kitex],将其实践成果进行开源,反馈给开源社区。 ![image](/img/blog/Evolution_Kitex_High-performance/GolangRPC.png) +

字节跳动 Golang RPC 框架的演进

-## Kite 的缺陷 +### Kite 的缺陷 Kite 作为字节跳动第一代 Golang RPC 框架,主要存在以下缺陷: @@ -36,33 +38,33 @@ Kite 作为字节跳动第一代 Golang RPC 框架,主要存在以下缺陷: 因此,业务的快速发展和需求场景的多样化,催生了新一代 Golang RPC 框架 [Kitex][Kitex]。 -## Kitex +### Kitex [Kitex][Kitex] 的架构主要包括四个部分:Kitex Tool、Kitex Core、Kitex Byted、Second Party Pkg。 -* Kitex Core 是一个携带了一套微服务治理功能的 RPC 框架,它是 [Kitex][Kitex] 的核心部分。 -* Kitex Byted 是一套结合了字节跳动内部基础设施的拓展集合。通过这一套拓展集合,[Kitex][Kitex] 能够在内部支持业务的发展。 -* Kitex Tool 是一个命令行工具,能够在命令行生成我们的代码以及服务的脚手架,可以提供非常便捷的开发体验。 -* Second Party Pkg,例如 [Netpoll][Netpoll], Netpoll-http2,是 [Kitex][Kitex] 底层的网络库,这两个库也开源在 CloudWeGo 组织中。 +- Kitex Core 是一个携带了一套微服务治理功能的 RPC 框架,它是 [Kitex][Kitex] 的核心部分。 +- Kitex Byted 是一套结合了字节跳动内部基础设施的拓展集合。通过这一套拓展集合,[Kitex][Kitex] 能够在内部支持业务的发展。 +- Kitex Tool 是一个命令行工具,能够在命令行生成我们的代码以及服务的脚手架,可以提供非常便捷的开发体验。 +- Second Party Pkg,例如 [Netpoll][Netpoll], Netpoll-http2,是 [Kitex][Kitex] 底层的网络库,这两个库也开源在 CloudWeGo 组织中。 ![image](/img/blog/Evolution_Kitex_High-performance/Architecture_design.png) +

Kitex 的架构设计

总的来说, [Kitex][Kitex] 主要有五个特点:面向开源、功能丰富、灵活可拓展、支持多协议、高性能。 -## 面向开源 +### 面向开源 由于之前已经体验过了 Kite 维护的各种问题,我们在立项之初就考虑到了未来可能会开源 [Kitex][Kitex]。因此,我们设计的第一个宗旨就是不将 [Kitex][Kitex] 和公司内部的基础设施进行强耦合或者硬编码绑定。 Kitex Core 是一个非常简洁的框架,公司内部的所有基础设施都以拓展的方式注入到 Kitex Core 里。即使我们现在已经开源了,它也以这种形式存在。 公司内部基础设施的更新换代,和 [Kitex][Kitex] 自身的迭代是相互独立的,这对于业务来说是非常好的体验。同时,在 [Kitex][Kitex] 的接口设计上,我们使用了 Golang 经典的 Option 模式, 它是可变参数,通过 Option 能够提供各种各样的功能,这为我们的开发和业务的使用都带来了非常大的灵活性。 -## Kitex 的功能特性 - -### 治理能力 +### Kitex 的功能特性 +#### 治理能力 [Kitex][Kitex] 内置了丰富的服务治理能力,例如超时熔断、重试、负载均衡、泛化调用、数据透传等功能。业务或者外部的用户使用 [Kitex][Kitex] 都是可以开箱即用的。 如果你有非常特殊的需求,你也可以通过我们的注入点去进行定制化操作,比如你可以自定义中间件去过滤或者拦截请求,定义跟踪器去注入日志、去注入服务发现等。 @@ -81,37 +83,40 @@ Kitex Core 是一个非常简洁的框架,公司内部的所有基础设施都 业务方使用时,不需要感知很多东西去配置,只需要添加一个 Suite 就足够了,这点非常方便一些中台方或者第三方去做定制。 ![image](/img/blog/Evolution_Kitex_High-performance/Suite.png) +

示例

-### 多协议 +#### 多协议 [Kitex][Kitex] 网络层基于高性能网络库 [Netpoll][Netpoll] 实现。在 [Netpoll][Netpoll] 上,我们构建了 Thrift 和 Netpoll-http2;在 Thrift 上,我们还做了一些特殊的定制,例如,支持 Thrift 的泛化调用,还有基于 Thrift 的连接多路复用。 ![image](/img/blog/Evolution_Kitex_High-performance/Multi-protocol.png) +

多协议

-### 代码生成工具 +#### 代码生成工具 和 [Kitex][Kitex] 一同出现的,还有我们开发的一个简单易用的命令行工具 kitex。如果我们写了一个 IDL,只需要提供一个 module 参数和一个服务名称,kitex 就会为你生成服务代码脚手架。 目前 [Kitex][Kitex] 支持了 Protobuf 和 Thrift 这两种 IDL 的定义。命令行工具内置丰富的选项,可以进行项目代码定制;同时,它底层依赖 Protobuf 官方的编译器,和我们自研的 Thriftgo 的编译器,两者都支持自定义的生成代码插件。 -## Kitex 的性能表现 +### Kitex 的性能表现 字节跳动内部 RPC 框架使用的协议主要都是基于 Thrift,所以我们在 Thrift 上深耕已久。结合自研的 [Netpoll][Netpoll] 能力,它可以直接暴露底层连接的 buffer。 在此基础上,我们设计出了 FastRead/FastWrite 编解码实现,测试发现它具有远超过 apache thrift 生成代码的性能。整体而言,[Kitex][Kitex] 的性能相当不错,今年 1 月份的数据如下图所示, 可以看到,[Kitex][Kitex] 在使用 Thrift 作为 Payload 的情况下,性能优于官方 gRPC,吞吐接近 gRPC 的两倍;此外,在 [Kitex][Kitex] 使用定制的 Protobuf 协议时,性能也优于 gRPC。 ![image](/img/blog/Evolution_Kitex_High-performance/Kitex_gRPC.png) +

Kitex/gRPC 性能对比(2022 年 1 月数据)

-## Kitex:一个 demo +### Kitex:一个 demo 下面简单演示一下 [Kitex][Kitex] 是如何开发一个服务的。 @@ -119,6 +124,7 @@ Kitex/gRPC 性能对比(2022 年 1 月数据) 编写完这个 demo.thrift 文件之后,就可以使用 [Kitex][Kitex] 在命令行生成指定的生成代码。如图所示,只需要传入 module name,service name 和目标 IDL 就行了。 ![image](/img/blog/Evolution_Kitex_High-performance/IDL.png) +

定义 IDL

@@ -127,11 +133,13 @@ Kitex/gRPC 性能对比(2022 年 1 月数据) 接下来需要通过 go mod tidy 把依赖拉下来,然后用 build.sh 构建,就可以启动服务了。[Kitex][Kitex] 默认的接听端口是 8888。 ![image](/img/blog/Evolution_Kitex_High-performance/Handler.png) +

定义 Handler 方法

![image](/img/blog/Evolution_Kitex_High-performance/Compile_run.png) +

编译、运行

@@ -140,24 +148,26 @@ Kitex/gRPC 性能对比(2022 年 1 月数据) 这里同样是 import 刚刚生成的生成代码,创建 Client、指定服务名字、构成相应的参数,填上“ Hello,word!” ,然后就可以调用了。 ![image](/img/blog/Evolution_Kitex_High-performance/Client.png) +

编写 Client

-# Kitex 在字节内部的落地 +## Kitex 在字节内部的落地 -## 与内部基础设施的集成 +### 与内部基础设施的集成 谈到落地,第一步就是 [Kitex][Kitex] 和字节跳动内部的基础设施进行结合。字节跳动内部的所有基础设施都是以依赖的方式注入到 [Kitex][Kitex] 的。 我们将日志、监控、tracing 都定义为 tracer,然后通过 WithTracer 这个 Option 将其注入到 [Kitex][Kitex] 里;服务发现是 WithResolver;Service Mesh 则是 WithProxy 等。 字节跳动内部的基础设施都是通过 Option 被注入到 [Kitex][Kitex] 的,而且所有的 Option 都是通过前面说的 Suite 打包,简单地添加到业务的代码里完成。 ![image](/img/blog/Evolution_Kitex_High-performance/Integration.png) +

与内部基础设施的集成

-## 内部落地的经典案例:合并部署 +### 内部落地的经典案例:合并部署 这里介绍一个内部落地的经典案例:合并部署。其背景是,在开发微服务时,由于业务拆分和业务场景的多样化,微服务容易出现过微的情况。 当服务数量越来越多,网络传输和序列化开销就会越来越大,变得不可忽视。因此,[Kitex][Kitex] 框架需要考虑如何减小网络传输和序列化的开销。 @@ -172,22 +182,23 @@ Kitex/gRPC 性能对比(2022 年 1 月数据) 那么,它的效果如何呢?在 2021 年的实践过程中,我们对抖音的某个服务约 30% 的流量进行了合并,服务端的 CPU 的消耗减少了 19%, TP99 延迟下降到 29%,效果相当显著。 ![image](/img/blog/Evolution_Kitex_High-performance/Merge_deployment.png) +

内部落地的经典案例:合并部署

-## 微服务框架推进的痛点 +### 微服务框架推进的痛点 -* 升级慢 +- 升级慢 大家可能好奇 [Kitex][Kitex] 在字节跳动内部推广是不是很顺畅?其实并不是。作为一个相对而言比较新的框架, [Kitex][Kitex] 和其它新生项目一样,在推广的过程中都会遇到同样的问题。 特别是, [Kitex][Kitex] 作为一个 RPC 框架,我们提供给用户的其实是一个代码的 SDK, 我们的更新是需要业务方的用户去感知、升级、部署上线,才能最终体现在他们的服务逻辑里,因此具有升级慢的问题。 -* 召回慢 +- 召回慢 同时,因为代码都是由研发人员编写,如果代码出现了 bug,我们就需要及时地去感知定位问题,通知负责人去更新版本。因此,会有召回慢的问题。 -* 问题排查困难 +- 问题排查困难 业务方的用户在写代码时,他们其实往往关注的是自己的业务逻辑,他们不会深入理解一个框架内部的实现。所以如果出现问题,他们往往会不知所措,需要依赖我们的业务同学才能进行相应的问题排查。所以会有问题排查困难的问题。 @@ -207,31 +218,31 @@ Kitex/gRPC 性能对比(2022 年 1 月数据) ![image](/img/blog/Evolution_Kitex_High-performance/Number_of_AccessServices.png) -# Kitex 的开源实践 +## Kitex 的开源实践 开源工作主要包括代码、文档和社区运营三个层面。 **代码层面** -* 代码拆分、脱敏; -* 内部仓库引用开源仓库,避免内外多副本同时维护; -* 在开源过程中确保内部用户平滑切换、体验无损; +- 代码拆分、脱敏; +- 内部仓库引用开源仓库,避免内外多副本同时维护; +- 在开源过程中确保内部用户平滑切换、体验无损; **文档层面** -* 重新梳理用户文档,覆盖方方面面; -* 建立详尽的用例仓库(CloudWeGo/Kitex-examples)。 +- 重新梳理用户文档,覆盖方方面面; +- 建立详尽的用例仓库(CloudWeGo/Kitex-examples)。 **社区运营** -* 官网建设; -* 组建用户群,进行答疑解惑; -* 飞书机器人对接 Github 的 Issue 管理、PR 管理之类的业务,可以快速响应; -* 对优秀贡献者进行奖励。 +- 官网建设; +- 组建用户群,进行答疑解惑; +- 飞书机器人对接 Github 的 Issue 管理、PR 管理之类的业务,可以快速响应; +- 对优秀贡献者进行奖励。 在以上努力下,[CloudWeGo/Kitex](https://github.com/cloudwego/kitex) 仓库目前收获了 4.1k+ stars;[kitex-contrib](https://github.com/kitex-contrib) 获得多个外部用户贡献的仓库;CloudWeGo 飞书用户群近 950 个用户…… -# 未来展望 +## 未来展望 首先,我们仍然会持续向开源社区反馈最新的技术进展。例如在 Thrift 协议上,虽然对 Thrift 的编解码已经做到非常极致的优化了,我们还在探索利用 JIT 手段来提供更多的性能提升; 在 Protobuf 上,我们会补足短板,将在 Thrift 方面的优化经验迁移到 Protobuf 上,对 Protobuf 的生成代码和编解码进行优化; [Kitex][Kitex] 后续也会进一步融入云原生社区,所以也在考虑支持 xDS 协议。 diff --git a/content/zh/blog/news/Go_HTTP_Hertz_Design/index.md b/content/zh/blog/news/Go_HTTP_Hertz_Design/index.md index 5b4e24e310..95a4e99829 100644 --- a/content/zh/blog/news/Go_HTTP_Hertz_Design/index.md +++ b/content/zh/blog/news/Go_HTTP_Hertz_Design/index.md @@ -1,6 +1,7 @@ --- date: 2022-06-21 title: "字节跳动开源 Go HTTP 框架 Hertz 设计实践" +projects: ["Hertz"] linkTitle: "字节跳动开源 Go HTTP 框架 Hertz 设计实践" keywords: ['Go', 'HTTP', 'Hertz', '架构设计', "功能特性"] description: "本文介绍了字节跳动开源 Go HTTP 框架 Hertz 的项目起源、架构设计、功能特性以及性能表现。" diff --git a/content/zh/blog/news/Hertz_Benchmark/index.md b/content/zh/blog/news/Hertz_Benchmark/index.md index 7b2cdfbe02..5905095e52 100644 --- a/content/zh/blog/news/Hertz_Benchmark/index.md +++ b/content/zh/blog/news/Hertz_Benchmark/index.md @@ -1,6 +1,7 @@ --- date: 2023-02-24 title: "HTTP 框架 Hertz 实践入门:性能测试指南" +projects: ["Hertz"] linkTitle: "HTTP 框架 Hertz 实践入门:性能测试指南" keywords: ["CloudWeGo", "Hertz", "HTTP 框架", "性能测试"] description: "本文旨在分享开发者在压测 Hertz 需要了解的场景和技术问题,并且基于当前最新版本对多个框架进行了压测对比,提供了性能参考数据,有助于用户更好地结合真实 HTTP 场景对 Hertz 进行调优,使之更贴合业务需要、发挥最佳性能。" diff --git a/content/zh/blog/news/Hertz_Open_Source/index.md b/content/zh/blog/news/Hertz_Open_Source/index.md index 7ca381c09c..e34fbe9f0b 100644 --- a/content/zh/blog/news/Hertz_Open_Source/index.md +++ b/content/zh/blog/news/Hertz_Open_Source/index.md @@ -1,6 +1,7 @@ --- date: 2022-06-21 title: "超大规模的企业级微服务 HTTP 框架 — Hertz 正式开源!" +projects: ["Hertz"] linkTitle: "超大规模的企业级微服务 HTTP 框架 — Hertz 正式开源!" keywords: ["hertz", "微服务", "http", "go", "golang", "开源"] description: "本文介绍了字节跳动正式开源超大规模的企业级微服务 HTTP 框架 — Hertz。" diff --git a/content/zh/blog/news/Hertz_Stream_Based_Design/index.md b/content/zh/blog/news/Hertz_Stream_Based_Design/index.md index 478de2a9cc..98ba7a8ad3 100644 --- a/content/zh/blog/news/Hertz_Stream_Based_Design/index.md +++ b/content/zh/blog/news/Hertz_Stream_Based_Design/index.md @@ -1,6 +1,7 @@ --- date: 2023-08-02 title: "Hertz 支持 QUIC & HTTP/3" +projects: ["Hertz"] linkTitle: "Hertz 支持 QUIC & HTTP/3" keywords: ['Go', 'Hertz', 'QUIC', 'HTTP/3', '接口设计'] description: "本文介绍了 Hertz 为支持 QUIC & HTTP/3 在网络传输层和协议层提供的接口设计方案。" diff --git a/content/zh/blog/news/Kitex_Proxyless_OpenTelemetry/index.md b/content/zh/blog/news/Kitex_Proxyless_OpenTelemetry/index.md index 47ca0cb488..b1d9619875 100644 --- a/content/zh/blog/news/Kitex_Proxyless_OpenTelemetry/index.md +++ b/content/zh/blog/news/Kitex_Proxyless_OpenTelemetry/index.md @@ -1,6 +1,7 @@ --- date: 2022-11-08 title: "Kitex Proxyless 之流量路由:配合 Istio 与 OpenTelemetry 实现全链路泳道" +projects: ["Kitex"] linkTitle: "Kitex Proxyless 之流量路由:配合 Istio 与 OpenTelemetry 实现全链路泳道" keywords: ["CloudWeGo", "Proxyless", "流量路由", "全链路泳道", "Bookinfo"] description: "本文主要介绍了基于 Kitex Proxyless 实现流量路由,从而在 biz-demo 中使用 Kitex 和 Hertz 重写 bookinfo 项目,实现的目的是以实战的方式演示如何使用 xDS 实现全链路的流量泳道。" diff --git a/content/zh/blog/news/Kitex_perf_optimize_practices/index.md b/content/zh/blog/news/Kitex_perf_optimize_practices/index.md index 0fc4497548..c48c495412 100644 --- a/content/zh/blog/news/Kitex_perf_optimize_practices/index.md +++ b/content/zh/blog/news/Kitex_perf_optimize_practices/index.md @@ -1,6 +1,7 @@ --- date: 2021-09-23 title: "字节跳动 Go RPC 框架 Kitex 性能优化实践" +projects: ["Kitex"] linkTitle: "字节跳动 Go RPC 框架 Kitex 性能优化实践" keywords: ["Kitex", "性能优化", "Netpoll", "Thrift", "序列化"] description: "本文介绍了字节跳动 Go RPC 框架 Kitex 的性能优化实践,包括 Netpoll、Thrift、序列化等方面的优化。" diff --git a/content/zh/blog/news/Kitex_performance_testing/index.md b/content/zh/blog/news/Kitex_performance_testing/index.md index f635db8ef8..a2ef44416d 100644 --- a/content/zh/blog/news/Kitex_performance_testing/index.md +++ b/content/zh/blog/news/Kitex_performance_testing/index.md @@ -1,6 +1,7 @@ --- date: 2021-11-24 title: "RPC 框架 Kitex 实践入门:性能测试指南" +projects: ["Kitex"] linkTitle: "RPC 框架 Kitex 实践入门:性能测试指南" keywords: ["Kitex", "性能测试", "压测", "RPC"] description: "本文介绍了如何使用 Kitex 进行性能测试,以及如何分析测试结果,有助于用户更好地结合真实 RPC 场景对 Kitex 进行调优,使之更贴合业务需要、发挥最佳性能。" diff --git a/content/zh/blog/news/Microservices_OpenSource_CloudWeGo/index.md b/content/zh/blog/news/Microservices_OpenSource_CloudWeGo/index.md index ef0b71b776..629c282674 100644 --- a/content/zh/blog/news/Microservices_OpenSource_CloudWeGo/index.md +++ b/content/zh/blog/news/Microservices_OpenSource_CloudWeGo/index.md @@ -1,8 +1,9 @@ --- date: 2022-05-26 title: "从 CloudWeGo 谈云原生时代的微服务与开源" +projects: ["CloudWeGo"] linkTitle: "从 CloudWeGo 谈云原生时代的微服务与开源" -keywords: ["CloudWeGo","微服务","开源"] +keywords: ["CloudWeGo", "微服务", "开源"] description: "本文将从 CloudWeGo 的角度,介绍云原生时代的微服务与开源的关系,以及 CloudWeGo 在微服务开源领域的探索与实践。" author: GuangmingLuo --- @@ -32,10 +33,10 @@ CloudWeGo 是字节跳动基础架构团队开源出来的项目,它是一套 CloudWeGo 在第一阶段开源了四个项目: -* [Kitex][Kitex]:高性能、强可扩展的 Golang RPC 框架 -* [Netpoll][Netpoll]:高性能、I/O 非阻塞、专注于 RPC 场景的网络框架 -* [Thriftgo][Thriftgo]:Golang 实现的 thrift 编译器,支持插件机制和语义检查 -* Netpoll-http2:基于 [Netpoll][Netpoll] 的 HTTP/2 实现 +- [Kitex][Kitex]:高性能、强可扩展的 Golang RPC 框架 +- [Netpoll][Netpoll]:高性能、I/O 非阻塞、专注于 RPC 场景的网络框架 +- [Thriftgo][Thriftgo]:Golang 实现的 thrift 编译器,支持插件机制和语义检查 +- Netpoll-http2:基于 [Netpoll][Netpoll] 的 HTTP/2 实现 除了这几个主要项目外,CloudWeGo 紧随其后陆续开源了 [**Kitex-benchmark**](https://github.com/cloudwego/kitex-benchmark)、[**Netpoll-benchmark**](https://github.com/cloudwego/netpoll-benchmark)、 [**Thrift-gen-validator**](https://github.com/cloudwego/thrift-gen-validator)、[**Kitex-examples**](https://github.com/cloudwego/kitex-examples) 、[**Netpoll-examples**](https://github.com/cloudwego/netpoll-examples)等项目。 @@ -43,7 +44,7 @@ CloudWeGo 在第一阶段开源了四个项目: 鉴于文章篇幅有限,下文将重点介绍 CloudWeGo 核心项目 [Kitex][Kitex]。 从**演进历史**来看,2014 年,字节跳动技术团队引入 Golang 解决长连接推送业务面临的高并发问题,两年后,内部技术团队基于 Golang 推出了一个名为 Kite 的框架,同时对开源项目 Gin 做了一层很薄的封装,推出了 Ginex。 -这两个框架极大推动了 Golang 在公司内部的应用。此后,围绕性能和可扩展性设计,字节跳动重构 Kite,并在次年 10 月完成并发布Kitex,投入到内部应用中。据悉,截至 2021 年 9 月,线上有 3w+ 微服务使用 Kitex,大部分服务迁移新框架后可以收获 CPU 和延迟上的收益。 +这两个框架极大推动了 Golang 在公司内部的应用。此后,围绕性能和可扩展性设计,字节跳动重构 Kite,并在次年 10 月完成并发布 Kitex,投入到内部应用中。据悉,截至 2021 年 9 月,线上有 3w+ 微服务使用 Kitex,大部分服务迁移新框架后可以收获 CPU 和延迟上的收益。 ![image](/img/blog/Microservices_Open_CloudWeGo/Framework.PNG) @@ -55,13 +56,13 @@ CloudWeGo 在第一阶段开源了四个项目: ![image](/img/blog/Microservices_Open_CloudWeGo/Functions_Features.PNG) -* **高性能**:网络传输模块 [Kitex][Kitex] 默认集成了自研的网络库 [Netpoll][Netpoll],性能相较使用 go net 有显著优势;除了网络库带来的性能收益,[Kitex][Kitex] 对 Thrift 编解码也做了深度优化。关于性能数据可参考 [kitex-benchmark](https://github.com/cloudwego/kitex-benchmark)。 -* **扩展性**:[Kitex][Kitex] 设计上做了模块划分,提供了较多的扩展接口以及默认的扩展实现,使用者也可以根据需要自行定制扩展,更多扩展能力参见 CloudWeGo [官网文档](https://www.cloudwego.io/zh/docs/kitex/tutorials/framework-exten/)。[Kitex][Kitex] 也并未耦合 [Netpoll][Netpoll],开发者也可以选择其它网络库扩展使用。 -* **消息协议**:RPC 消息协议默认支持 Thrift、Kitex Protobuf、gRPC。Thrift 支持 Buffered 和 Framed 二进制协议;Kitex Protobuf 是 [Kitex][Kitex] 自定义的 Protobuf 消息协议,协议格式类似 Thrift;gRPC 是对 gRPC 消息协议的支持,可以与 gRPC 互通。除此之外,使用者也可以扩展自己的消息协议。 -* **传输协议**:传输协议封装消息协议进行 RPC 互通,传输协议可以额外透传元信息,用于服务治理,[Kitex][Kitex] 支持的传输协议有 TTHeader、HTTP2。TTHeader 可以和 Thrift、Kitex Protobuf 结合使用;HTTP2 目前主要是结合 gRPC 协议使用,后续也会支持 Thrift。 -* **多消息类型**:支持 PingPong、Oneway、双向 Streaming。其中 Oneway 目前只对 Thrift 协议支持,双向 Streaming 只对 gRPC 支持,后续会考虑支持 Thrift 的双向 Streaming。 -* **服务治理**:支持服务注册/发现、负载均衡、熔断、限流、重试、监控、链路跟踪、日志、诊断等服务治理模块,大部分均已提供默认扩展,使用者可选择集成。 -* **[Kitex][Kitex] 内置代码生成工具,可支持生成 Thrift、Protobuf 以及脚手架代码**。原生的 Thrift 代码由本次一起开源的 [Thriftgo][Thriftgo] 生成,[Kitex][Kitex] 对 Thrift 的优化由 Kitex Tool 作为插件支持。Protobuf 代码由 Kitex 作为官方 protoc 插件生成 ,目前暂未单独支持 Protobuf IDL 的解析和代码生成。 +- **高性能**:网络传输模块 [Kitex][Kitex] 默认集成了自研的网络库 [Netpoll][Netpoll],性能相较使用 go net 有显著优势;除了网络库带来的性能收益,[Kitex][Kitex] 对 Thrift 编解码也做了深度优化。关于性能数据可参考 [kitex-benchmark](https://github.com/cloudwego/kitex-benchmark)。 +- **扩展性**:[Kitex][Kitex] 设计上做了模块划分,提供了较多的扩展接口以及默认的扩展实现,使用者也可以根据需要自行定制扩展,更多扩展能力参见 CloudWeGo [官网文档](https://www.cloudwego.io/zh/docs/kitex/tutorials/framework-exten/)。[Kitex][Kitex] 也并未耦合 [Netpoll][Netpoll],开发者也可以选择其它网络库扩展使用。 +- **消息协议**:RPC 消息协议默认支持 Thrift、Kitex Protobuf、gRPC。Thrift 支持 Buffered 和 Framed 二进制协议;Kitex Protobuf 是 [Kitex][Kitex] 自定义的 Protobuf 消息协议,协议格式类似 Thrift;gRPC 是对 gRPC 消息协议的支持,可以与 gRPC 互通。除此之外,使用者也可以扩展自己的消息协议。 +- **传输协议**:传输协议封装消息协议进行 RPC 互通,传输协议可以额外透传元信息,用于服务治理,[Kitex][Kitex] 支持的传输协议有 TTHeader、HTTP2。TTHeader 可以和 Thrift、Kitex Protobuf 结合使用;HTTP2 目前主要是结合 gRPC 协议使用,后续也会支持 Thrift。 +- **多消息类型**:支持 PingPong、Oneway、双向 Streaming。其中 Oneway 目前只对 Thrift 协议支持,双向 Streaming 只对 gRPC 支持,后续会考虑支持 Thrift 的双向 Streaming。 +- **服务治理**:支持服务注册/发现、负载均衡、熔断、限流、重试、监控、链路跟踪、日志、诊断等服务治理模块,大部分均已提供默认扩展,使用者可选择集成。 +- **[Kitex][Kitex] 内置代码生成工具,可支持生成 Thrift、Protobuf 以及脚手架代码**。原生的 Thrift 代码由本次一起开源的 [Thriftgo][Thriftgo] 生成,[Kitex][Kitex] 对 Thrift 的优化由 Kitex Tool 作为插件支持。Protobuf 代码由 Kitex 作为官方 protoc 插件生成 ,目前暂未单独支持 Protobuf IDL 的解析和代码生成。 简单总结一下,CloudWeGo 不仅仅是一个开源的项目,也是一个真实的、超大规模的**企业级**最佳实践。它源自企业,所以天生就适合在企业内部落地;它源自开源,最终也拥抱了开源,从 Go 基础库,到 Go 网络库和 Thrift 编译器,再到上层的服务框架,以及框架拥有的所有企业级治理能力,均对外开放开源。 @@ -103,11 +104,11 @@ CloudWeGo 在第一阶段开源了四个项目: [Kitex][Kitex] 大部分服务治理模块都是通过 Middleware 集成,熔断也是一样。[Kitex][Kitex] 提供了一套 CBSuite,封装了服务粒度的熔断器和实例粒度的熔断器。 -* **服务粒度熔断**:按照服务粒度进行熔断统计,通过 WithMiddleware 添加。服务粒度的具体划分取决于 Circuit Breaker Key,即熔断统计的 Key,初始化 CBSuite 时需要传入 **GenServiceCBKeyFunc**。 +- **服务粒度熔断**:按照服务粒度进行熔断统计,通过 WithMiddleware 添加。服务粒度的具体划分取决于 Circuit Breaker Key,即熔断统计的 Key,初始化 CBSuite 时需要传入 **GenServiceCBKeyFunc**。 默认提供的是 `circuitbreaker.RPCInfo2Key`,该 Key 的格式是 `fromServiceName/toServiceName/method`,即按照方法级别的异常做熔断统计。 -* **实例粒度熔断**:按照实例粒度进行熔断统计,主要用于解决单实例异常问题,如果触发了实例级别熔断,框架会自动重试。 +- **实例粒度熔断**:按照实例粒度进行熔断统计,主要用于解决单实例异常问题,如果触发了实例级别熔断,框架会自动重试。 -**熔断器的思路很简单根据 RPC 成功或失败的情况,限制对下游的访问**。通常熔断器分为三个时期:CLOSED、OPEN、HALFOPEN。当RPC 正常时,为 CLOSED; +**熔断器的思路很简单根据 RPC 成功或失败的情况,限制对下游的访问**。通常熔断器分为三个时期:CLOSED、OPEN、HALFOPEN。当 RPC 正常时,为 CLOSED; 当 RPC 错误增多时,熔断器会被触发,进入 OPEN;OPEN 后经过一定的冷却时间,熔断器变为 HALFOPEN;HALFOPEN 时会对下游进行一些有策略的访问, 然后根据结果决定是变为 CLOSED,还是 OPEN。总的来说三个状态的转换大致如下图: @@ -135,8 +136,8 @@ CloudWeGo 在第一阶段开源了四个项目: [Kitex][Kitex] 提供三类重试:超时重试、Backup Request,建连失败重试。其中建连失败是网络层面问题,由于请求未发出,框架会默认重试,下面重点介绍前两类重试的使用。需要注意的是,因为很多的业务请求不具有**幂等性**,这两类重试不会作为默认策略,用户需要按需开启。 -* **超时重试**:错误重试的一种,即客户端收到超时错误的时候,发起重试请求。 -* **Backup Request**:客户端在一段时间内还没收到返回,发起重试请求,任一请求成功即算成功。Backup Request 的等待时间 `RetryDelay` 建议配置为 TP99,一般远小于配置的超时时间 `Timeout`。 +- **超时重试**:错误重试的一种,即客户端收到超时错误的时候,发起重试请求。 +- **Backup Request**:客户端在一段时间内还没收到返回,发起重试请求,任一请求成功即算成功。Backup Request 的等待时间 `RetryDelay` 建议配置为 TP99,一般远小于配置的超时时间 `Timeout`。 ![image](/img/blog/Microservices_Open_CloudWeGo/Timeout.png) @@ -150,14 +151,14 @@ CloudWeGo 在第一阶段开源了四个项目: [Kitex][Kitex] 默认提供了两种负载均衡算法实现: -* **WeightedRandom**:这个算法使用的是基于权重的随机策略,也是 [Kitex][Kitex] 的默认策略。它会依据实例的权重进行加权随机,并保证每个实例分配到的负载和自己的权重成比例。 -* **ConsistentHash**:一致性哈希主要适用于对上下文(如实例本地缓存)依赖程度高的场景,如希望同一个类型的请求打到同一台机器,则可使用该负载均衡方法。 +- **WeightedRandom**:这个算法使用的是基于权重的随机策略,也是 [Kitex][Kitex] 的默认策略。它会依据实例的权重进行加权随机,并保证每个实例分配到的负载和自己的权重成比例。 +- **ConsistentHash**:一致性哈希主要适用于对上下文(如实例本地缓存)依赖程度高的场景,如希望同一个类型的请求打到同一台机器,则可使用该负载均衡方法。 ConsistentHash 在使用时,需要注意如下事项: -* 下游节点发生变动时,一致性哈希结果可能会改变,某些 Key 可能会发生变化; -* 如果下游节点非常多,第一次冷启动时 Build 时间可能会较长,如果 RPC 超时短的话可能会导致超时; -* 如果第一次请求失败,并且 Replica 不为 0,那么会请求到 Replica 上;而第二次及以后仍然会请求第一个实例。 +- 下游节点发生变动时,一致性哈希结果可能会改变,某些 Key 可能会发生变化; +- 如果下游节点非常多,第一次冷启动时 Build 时间可能会较长,如果 RPC 超时短的话可能会导致超时; +- 如果第一次请求失败,并且 Replica 不为 0,那么会请求到 Replica 上;而第二次及以后仍然会请求第一个实例。 ### **可观测性** @@ -187,10 +188,10 @@ ConsistentHash 在使用时,需要注意如下事项: 此外,在大规模场景下,针对服务治理新功能的研发需求决策,我们往往还需要考虑以下因素: -* **性能:** 大部分业务很在意,也是团队一直努力的重点; -* **普遍性**:需要评估是不是所有业务都需要的能力; -* **简洁**: 通俗说,我们不太希望引入太多的线上问题或者太复杂的使用说明文档; -* **ROI**:功能迭代、产品升级需要考虑整体投资回报率。 +- **性能:** 大部分业务很在意,也是团队一直努力的重点; +- **普遍性**:需要评估是不是所有业务都需要的能力; +- **简洁**: 通俗说,我们不太希望引入太多的线上问题或者太复杂的使用说明文档; +- **ROI**:功能迭代、产品升级需要考虑整体投资回报率。 ## **04 CloudWeGo 的开源之路** @@ -239,7 +240,6 @@ CloudWeGo 在 2021 年底收录进入 CNCF Landscape,丰富了 CNCF 在 RPC 从功能研发计划来看,以 [Kitex][Kitex] 为例,将继续以内外部用户需求为驱动力,持续开发新的功能并迭代完善已有的功能。其中,包括支持连接预热、自定义异常重试、对 Protobuf 支持的性能优化,支持 xDS 协议等。 - 从开源生态来看,目前 [Kitex][Kitex] 已经完成了诸多开源项目的对接,未来也将会按需支持更多开源生态。 此外,CloudWeGo 也在和国内外主流公有云厂商进行合作对接,提供开箱即用、稳定可靠的微服务托管与治理产品的基座;CloudWeGo 也积极与国内外软件基金会开展合作和交流,探索新的合作模式。 diff --git a/content/zh/blog/news/Monoio_Open_Source/index.md b/content/zh/blog/news/Monoio_Open_Source/index.md index cab901802b..a3f65099e2 100644 --- a/content/zh/blog/news/Monoio_Open_Source/index.md +++ b/content/zh/blog/news/Monoio_Open_Source/index.md @@ -1,6 +1,7 @@ --- date: 2023-04-17 title: "字节开源 Monoio :基于 io-uring 的高性能 Rust Runtime" +projects: ["Monoio"] linkTitle: "字节开源 Monoio :基于 io-uring 的高性能 Rust Runtime" keywords: ["CloudWeGo", "Monoio", "io-uring", "开源", "Rust"] description: "本文介绍了 Rust 异步机制、Monoio 的设计概要、Runtime 对比选型和应用等。" @@ -8,13 +9,16 @@ author: CloudWeGo Rust Te --- ## 概述 + 尽管 Tokio 目前已经是 Rust 异步运行时的事实标准,但要实现极致性能的网络中间件还有一定距离。为了这个目标,[CloudWeGo][CloudWeGo] Rust Team 探索基于 io-uring 为 Rust 提供异步支持,并在此基础上研发通用网关。 本文包括以下内容: + 1. 介绍 Rust 异步机制; 2. [Monoio][Monoio] 的一些设计精要; 3. Runtime 对比选型与应用。 ## Rust 异步机制 + 借助 Rustc 和 llvm,Rust 可以生成足够高效且安全的机器码。但是一个应用程序除了计算逻辑以外往往还有 IO,特别是对于网络中间件,IO 其实是占了相当大比例的。 程序做 IO 需要和操作系统打交道,编写异步程序通常并不是一件简单的事情,在 Rust 中是怎么解决这两个问题的呢?比如,在 C++里面,可能经常会写一些 callback ,但是我们并不想在 Rust 里面这么做,这样的话会遇到很多生命周期相关的问题。 @@ -24,6 +28,7 @@ Rust 允许自行实现 Runtime 来调度任务和执行 syscall;并提供了 ![image](/img/blog/Monoio_Open_Source/1_2_zh.png) ### Example + 这里从一个简单的例子入手,看一看这套系统到底是怎么工作的。 当并行下载两个文件时,在任何语言中都可以启动两个 Thread,分别下载一个文件,然后等待 thread 执行结束;但并不想为了 IO 等待启动多余的线程,如果需要等待 IO,我们希望这时线程可以去干别的,等 IO 就绪了再做就好。 @@ -94,6 +99,7 @@ pub enum Poll { ``` Future 描述状态机对外暴露的接口: + 1. 推动状态机执行:Poll 方法顾名思义就是去推动状态机执行,给定一个任务,就会推动这个任务做状态转换。 2. 返回执行结果: 1. 遇到了阻塞:Pending @@ -149,7 +155,7 @@ enum SumFuture { impl Future for SumFuture { type Output = i32; - + fn poll(self: Pin<&mut Self>, cx: &mut Context<' >) -> Poll { let this = self.get mut( ); loop { @@ -185,7 +191,7 @@ impl Future for SumFuture { 首先当我们创建 TCP stream 的时候,这个组件内部就会把它注册到一个 poller 上去,这个 poller 可以简单地认为是一个 epoll 的封装(具体使用什么 driver 是根据平台而异的)。 按照顺序来看,现在有一个 task ,要把这个 task spawn 出去执行。那么 spawn 本质上就是把 task 放到了 runtime 的任务队列里, -然后 runtime 内部会不停地从任务队列里面取出任务并且执行——执行就是推动状态机动一动,即调用它的 poll 方法,之后我们就来到了第2步。 +然后 runtime 内部会不停地从任务队列里面取出任务并且执行——执行就是推动状态机动一动,即调用它的 poll 方法,之后我们就来到了第 2 步。 ![image](/img/blog/Monoio_Open_Source/6.png) @@ -230,6 +236,7 @@ Future trait 里面除了有包含自身状态机的可变以借用以外,还 ![image](/img/blog/Monoio_Open_Source/7.png) 用户使用 listener.accept() 生成 AcceptFut 并等待: + 1. fut.await 内部使用 cx 调用 Future 的 poll 方法 2. poll 内部执行 syscall 3. 当前无连接拨入,kernel 返回 WOULD_BLOCK @@ -244,8 +251,9 @@ Future trait 里面除了有包含自身状态机的可变以借用以外,还 12. 12/13. kernel 返回 syscall 结果,poll 返回 Ready ### Runtime + 1. 先从 executor 看起,它有一个执行器和一个任务队列,它的工作是不停地取出任务,推动任务运行,之后在所有任务执行完毕必须等待时,把执行权交给 Reactor。 -2. Reactor 拿到了执行权之后,会与 kernel 打交道,等待 IO 就绪,IO就绪好了之后,我们需要标记这个 IO 的就绪状态,并且把这个 IO 所关联的任务给唤醒。唤醒之后,我们的执行权又会重新交回给 executor 。在 executor 执行这个任务的时候,就会调用到 IO 组件所提供的一些能力。 +2. Reactor 拿到了执行权之后,会与 kernel 打交道,等待 IO 就绪,IO 就绪好了之后,我们需要标记这个 IO 的就绪状态,并且把这个 IO 所关联的任务给唤醒。唤醒之后,我们的执行权又会重新交回给 executor 。在 executor 执行这个任务的时候,就会调用到 IO 组件所提供的一些能力。 3. IO 组件要能够提供这些异步的接口,比如说当用户想用 tcb stream 的时候,得用 runtime 提供的一个 TcpStream, 而不是直接用标准库的。第二,能够将自己的 fd 注册到 Reactor 上。第三,在 IO 没有就绪的时候,我们能把这个 waker 放到任务相关联的区域里。 整个 Rust 的异步机制大概就是这样。 @@ -253,14 +261,17 @@ Future trait 里面除了有包含自身状态机的可变以借用以外,还 ![image](/img/blog/Monoio_Open_Source/8.png) ## Monoio 设计 + 以下将会分为四个部分介绍 [Monoio][Monoio] Runtime 的设计要点: + 1. 基于 GAT(Generic associated types) 的异步 IO 接口; 2. 上层无感知的 Driver 探测与切换; 3. 如何兼顾性能与功能; 4. 提供兼容 Tokio 的接口 ### 基于 GAT 的纯异步 IO 接口 -首先介绍一下两种通知机制。第一种是和 epoll 类似的,基于就绪状态的一种通知。第二种是 io-uring 的模式,它是一个基于“完成通知”的模式。 + +首先介绍一下两种通知机制。第一种是和 epoll 类似的,基于就绪状态的一种通知。第二种是 io-uring 的模式,它是一个基于“完成通知”的模式。 ![image](/img/blog/Monoio_Open_Source/9.png) @@ -287,6 +298,7 @@ io_uring 允许用户和内核共享两个无锁队列,submission queue 是用 针对这两个问题 [Monoio][Monoio] 支持了带取消能力的 IO trait,取消时会推入 CancelOp,用户需要在取消后继续等待原 Future 执行结束(由于它已经被取消了,所以会预期在较短的时间内返回),对应的 syscall 可能执行成功或失败,并返还 buffer。 ### 上层无感知的 Driver 探测和切换 + 第二个特性是支持上层无感知的 Driver 探测和切换。 ```rust @@ -301,23 +313,26 @@ trait OpAble { 2. 暴露统一的 IO 接口,即 AsyncReadRent 和 AsyncWriteRent 3. 内部利用 OpAble 统一组件实现(对 Read、Write 等 Op 做抽象) -具体来说,比如想做 accept、connect 或者 read、write 之类的,这些 op 是实现了 OpAble 的,实际对应这三个 fn : +具体来说,比如想做 accept、connect 或者 read、write 之类的,这些 op 是实现了 OpAble 的,实际对应这三个 fn : 1. uring_op:生成对应 uring SQE 2. legacy_interest:返回其关注的读写方向 -3. legacy_call:直接执行syscall +3. legacy_call:直接执行 syscall ![image](/img/blog/Monoio_Open_Source/11.png) 整个流程会将一个实现了 opable 的结构 submit 到的 driver 上,然后会返回一个实现了 future 的东西,之后它 poll 的时候和 drop 的时候具体地会分发到两个 driver 实现中的一个,就会用这三个函数里面的一个或者两个。 ### 性能 + 性能是 [Monoio][Monoio] 的出发点和最大的优点。除了 io_uring 带来的提升外,它设计上是一个 thread-per-core 模式的 Runtime。 + 1. 所有 Task 均仅在固定线程运行,无任务窃取。 2. Task Queue 为 thread local 结构操作无锁无竞争。 高性能主要源于两个方面: -1. Runtime内部高性能:基本等价于裸对接syscall + +1. Runtime 内部高性能:基本等价于裸对接 syscall 2. 用户代码高性能:结构尽量 thread local 不跨线程 任务窃取和 thread-per-core 两种机制的对比: @@ -338,6 +353,7 @@ trait OpAble { 我们做了一些 benchmark,[Monoio][Monoio] 的性能水平扩展性是非常好的。当 CPU 核数增加的时候,只需要增加对应的线程就可以了。 ### 功能性 + Thread-per-core 不代表没有跨线程能力。用户依旧可以使用一些跨线程共享的结构,这些和 Runtime 无关;Runtime 提供了跨线程等待的能力。 任务在本线程执行,但可以等待其他线程上的任务,这个是一个很重要的能力。举例来说,用户需要用单线程去拉取远程配置,并下发到所有线程上。基于这个能力,用户就可以非常轻松地实现这个功能。 @@ -350,6 +366,7 @@ Thread-per-core 不代表没有跨线程能力。用户依旧可以使用一些 除了提供跨线程等待能力外,[Monoio][Monoio] 也提供了 spawn_blocking 能力,供用户执行较重的计算逻辑,以免影响到同线程的其他任务。 ### 兼容接口 + 由于目前很多组件(如 hyper 等)绑定了 tokio 的 IO trait,而前面讲了由于地层 driver 的原因这两种 IO trait 不可能统一,所以生态上会比较困难。对于一些非热路径的组件,需要允许用户以兼容方式使用,即便付出一些性能代价。 ![image](/img/blog/Monoio_Open_Source/14.png) @@ -366,6 +383,7 @@ let monoio_tcp = monoio::net::TcpStream::connect("1.1.1.1:80").await.unwrap(); 我们提供了一个 Wrapper,内置了一个 buffer,用户使用时需要多付出一次内存拷贝开销。通过这种方式,我们可以为 [monoio][Monoio] 的组件包装出 tokio 的兼容接口,使其可以使用兼容组件。 ## Runtime 对比&应用 + 这部分介绍 runtime 的一些对比选型和应用。 前面已经提到了关于均匀调度和 thread-per-core 的一些对比,这里主要说一下应用场景。对于较大量的轻任务,thread-per-core 模式是适合的。特别是代理、网关和文件 IO 密集的应用,使用 Monoio 就非常合适。 @@ -373,7 +391,7 @@ let monoio_tcp = monoio::net::TcpStream::connect("1.1.1.1:80").await.unwrap(); 还有一点,Tokio 致力于一个通用跨平台,但是 [monoio][Monoio] 设计之初就是为了极致性能,所以是期望以 io_uring 为主的。虽然也可以支持 epoll 和 kqueue,但仅作 fallback。 比如 kqueue 其实就是为了让用户能够在 Mac 上去开发的便利性,其实不期望用户真的把它跑在这(未来将支持 Windows)。 -生态部分,Tokio 的生态是比较全的,[monoio][Monoio] 的比较缺乏,即便有兼容层,兼容层本身是有开销的。Tokio 有任务窃取,可以在较多的场景表现很好,但其水平扩展性不佳。 +生态部分,Tokio 的生态是比较全的,[monoio][Monoio] 的比较缺乏,即便有兼容层,兼容层本身是有开销的。Tokio 有任务窃取,可以在较多的场景表现很好,但其水平扩展性不佳。 [monoio][Monoio] 的水平扩展就比较好,但是对这个业务场景和编程模型其实是有限制的。所以 [monoio][Monoio] 比较适合的一些场景就是代理、网关还有缓存数据聚合等。以及还有一些会做文件 io 的,因为 io_uring 对文件 io 非常好。 如果不用 io_uring 的话,在 Linux 下其实是没有真异步的文件 io 可以用的,只有用 io_uring 才能做到这一点。还适用于这种文件 io 比较密集的,比如说像 DB 类型的组件。 @@ -384,6 +402,7 @@ Tokio-uring 其实是一个构建在 tokio 之上的一层,有点像是一层 如果选择了 uring,那么编译产物就无法在旧版本 linux 上运行。而 [Monoio][Monoio] 很好的支持了这一点,支持动态探测 uring 的可用性。 ### Monoio 应用 + 1. Monoio Gateway: 基于 [Monoio][Monoio] 生态的网关服务,我们优化版本 Benchmark 下来性能优于 Nginx; 2. Volo: [CloudWeGo][CloudWeGo] Team 开源的 RPC 框架,目前在集成中,PoC 版本性能相比基于 Tokio 提升 26% diff --git a/content/zh/blog/news/Rust_or_Go/index.md b/content/zh/blog/news/Rust_or_Go/index.md index c570f9978f..66fb003dab 100644 --- a/content/zh/blog/news/Rust_or_Go/index.md +++ b/content/zh/blog/news/Rust_or_Go/index.md @@ -1,6 +1,7 @@ --- date: 2022-09-06 title: "选择 Go 还是 Rust?CloudWeGo-Volo 基于 Rust 语言的探索实践" +projects: ["Volo"] linkTitle: "选择 Go 还是 Rust?CloudWeGo-Volo 基于 Rust 语言的探索实践" keywords: ["Rust", "Go", "CloudWeGo", "Volo"] description: "本文介绍了 CloudWeGo-Volo 的起源、设计和实现,以及基于 Rust 语言的探索实践,包括 Go 的代价有哪些,Rust 的优势有哪些。" diff --git a/content/zh/blog/news/Shmipc_Open_Source/index.md b/content/zh/blog/news/Shmipc_Open_Source/index.md index 7ffb5ac384..0c4e6643d0 100644 --- a/content/zh/blog/news/Shmipc_Open_Source/index.md +++ b/content/zh/blog/news/Shmipc_Open_Source/index.md @@ -1,6 +1,7 @@ --- date: 2023-04-04 title: "字节跳动开源 Shmipc:基于共享内存的高性能 IPC" +projects: ["Shmipc"] linkTitle: "字节跳动开源 Shmipc:基于共享内存的高性能 IPC" keywords: ["CloudWeGo", "zero copy", "shared memory", "IPC"] description: "本文介绍了 CloudWeGo-Shmipc 项目的背景、设计思路和性能表现,以及在字节内部落地的踩坑记录,并分享了后续规划。" @@ -8,6 +9,7 @@ author: CloudWeGo TeamHchen, * CloudWeGo 从 2021 年 9 月 8 日正式开源。推出高性能的 RPC 框架 [Kitex][Kitex]、配合 [Kitex][Kitex] 使用的高性能网络库 [Netpoll][Netpoll]、基于 Thrift 代码生成工具 Thriftgo 和基础库 Sonic。 -> * 2022 年 5 月,开源了基于 JIT 的编解码工具 Frugal。[Kitex][Kitex] 配合 Frugal 的使用,能够带来 5 倍的性能提升。 -> * 2022 年 6 月,开源高性能 HTTP 框架 [Hertz][Hertz]。[Hertz][Hertz] 不仅仅是一个 高性能的HTTP 的开源框架,同时也是一个超大规模的企业落地实践。在我们内部的网关场景下,替换 [Hertz][Hertz] 框架之后的 CPU 使用节省了超过 40%。 -> * 2022 年 7 月,我们响应社区呼声最高的关于 Protobuf 的性能优化,带来了高性能的 Protobuf 序列化反序列化库 FastPB,再次对相关的性能进行提升。 -> * 开源一周年之际,我们又进行了更深度的高性能框架能力探索,开源了国内首个 Rust RPC 框架 [Volo][Volo]。 +> - CloudWeGo 从 2021 年 9 月 8 日正式开源。推出高性能的 RPC 框架 [Kitex][Kitex]、配合 [Kitex][Kitex] 使用的高性能网络库 [Netpoll][Netpoll]、基于 Thrift 代码生成工具 Thriftgo 和基础库 Sonic。 +> - 2022 年 5 月,开源了基于 JIT 的编解码工具 Frugal。[Kitex][Kitex] 配合 Frugal 的使用,能够带来 5 倍的性能提升。 +> - 2022 年 6 月,开源高性能 HTTP 框架 [Hertz][Hertz]。[Hertz][Hertz] 不仅仅是一个 高性能的 HTTP 的开源框架,同时也是一个超大规模的企业落地实践。在我们内部的网关场景下,替换 [Hertz][Hertz] 框架之后的 CPU 使用节省了超过 40%。 +> - 2022 年 7 月,我们响应社区呼声最高的关于 Protobuf 的性能优化,带来了高性能的 Protobuf 序列化反序列化库 FastPB,再次对相关的性能进行提升。 +> - 开源一周年之际,我们又进行了更深度的高性能框架能力探索,开源了国内首个 Rust RPC 框架 [Volo][Volo]。 CloudWeGo 开源一周年的时间线,隐藏着 CloudWeGo 社区运营的第一个长期主义关键词:**高性能技术解决方案的持续探索** 。 @@ -72,7 +73,7 @@ CloudWeGo 开源社区一直保持着我们所有的开源项目内外一致的 我们非常希望 CloudWeGo 开源出来的高性能技术解决方案,能够更好地帮助更多用户搭建自己的微服务架构体系。因此,CloudWeGo 在社区建设上围绕着易用性建设做了非常多的拓展: -* **CloudWeGo 文档建设** +- **CloudWeGo 文档建设** 首先,在文档建设方面,CloudWeGo 官网上线了近 3 万字较为完善的文档体系。内容覆盖从 1 分钟快速上手,到各个相关模块的基本特性介绍,再到一些拓展能力的建设。 @@ -82,7 +83,7 @@ CloudWeGo 开源社区一直保持着我们所有的开源项目内外一致的 ![image](/img/blog/opensource_community/4.png) -* **CloudWeGo 生态建设** +- **CloudWeGo 生态建设** 想在内部构建一套完整的云原生微服务架构体系,仅仅使用 CloudWeGo 的一个框架项目,是远远不够的。因此,CloudWeGo 在易用性方面大力拓展相关的生态建设。 @@ -93,7 +94,7 @@ CloudWeGo 开源社区一直保持着我们所有的开源项目内外一致的 ![image](/img/blog/opensource_community/5.png) -* **CloudWeGo 的开发者活动** +- **CloudWeGo 的开发者活动** CloudWeGo 项目包括整个社区都对高性能有非常热烈的追求。因此,我们也在不停地迭代。 @@ -204,7 +205,7 @@ CloudWeGo 和森马共同梳理了与电商行业相关的一个整体使用场 ![image](/img/blog/opensource_community/17.png) -* **字节跳动** +- **字节跳动** 字节跳动是我们目前最大的用户。字节跳动的线上微服务数量已经超过了 10 万,服务端峰值 QPS 已经达到了数亿的级别,业务复杂性非常大,存在跨语言、跨平台、跨终端、跨集群、跨机房等多种复杂的问题。 @@ -212,14 +213,14 @@ CloudWeGo 和森马共同梳理了与电商行业相关的一个整体使用场 在这个场景之下,字节跳动最大的需求就是高性能和可扩展性,这也是 CloudWeGo 作为字节跳动内部孵化的一个优秀的高性能技术解决方案最初开源时所具有的特性。 -* **处于转型期的用户** +- **处于转型期的用户** 社区里数量最大的群体,这些用户可能是电商的、证券的、后台的以及一些创业公司,他们的节点数量不是特别多,可能在 5-1000 以内, 线上微服务数量处于 5000 以内的水平,但这些用户可能本身就是云原生架构,或者已经在往这方面做一些相关的迁移。 这类用户在 CloudWeGo 开源社区的诉求,主要是针对业务的特异性方面存在高性能相关的需求。 -* **非云原生架构企业用户** +- **非云原生架构企业用户** 这一类用户属于非云原生架构的企业,他们的服务可能还没有完全云化,具有一定的历史迁移负担。这类用户着重会优先考虑如何将自己的服务迁移上云。 @@ -229,19 +230,19 @@ CloudWeGo 和森马共同梳理了与电商行业相关的一个整体使用场 我们认为理想状态下用户整个云原生架构体系的搭建过程: -* **第一个阶段:服务上云** +- **第一个阶段:服务上云** 类似第三类用户,当前面临的问题就是怎么把自己的业务迁移上云。 -* **第二个阶段:云原生部署** +- **第二个阶段:云原生部署** 类似第二类社区大量的用户,其实已经是云原生部署的企业,用到了相关容器化和编排调度的技术。 -* **第三个阶段:微服务架构** +- **第三个阶段:微服务架构** 继续往前演进,开始搭建相关的微服务架构,以及会做服务的拆分和通信的治理。 -* **第四个阶段:微服务治理** +- **第四个阶段:微服务治理** 当用户在线上有了一定数量的微服务之后,会开始出现依赖管理和一致性保障的问题。 @@ -259,15 +260,15 @@ CloudWeGo 1.0 社区搭建的主要方向,是将字节跳动内部孵化的高 ![image](/img/blog/opensource_community/19.png) -* **行业解决方案** +- **行业解决方案** 通过用户问题、场景和解决方案的行业共建,形成社区的 Go 云原生微服务最佳实践,希望能够针对有特异性需求的用户给到一定的参考。 -* **易用性建设** +- **易用性建设** 我们会持续和开源链条的上下游深入合作,建设云原生微服务相关的标准治理。致力于后续易用性的建设,希望能够给到成本更低的迁移,以及建立后期维护的治理标准。 -* **持续投资高性能方案** +- **持续投资高性能方案** 继续维持 CloudWeGo 开源社区的长期主义。我们会深入投入对高性能解决方案的持续探索,也会在 Rust 领域持续开展相关生态和开源的建设,共建 Rust 中国的开源生态。 @@ -277,9 +278,9 @@ CloudWeGo 1.0 社区搭建的主要方向,是将字节跳动内部孵化的高 CloudWeGo 2.0 的阶段,我们希望社区能够跨越项目边界,真正能够帮助社区用户搭建一套高性能的微服务治理架构和整体的微服务治理体系: -* 通过 Go 领域相关微服务治理的标准和最佳实践的建设,为一些通用性技术和行业最佳实践提供参考; -* 对接开源项目上下游进行深度合作,极大地提升整个项目的易用性; -* 推进高性能 Rust 解决方案的落地,持续探索 Rust 高性能技术解决方案,构建 Rust 相关生态。 +- 通过 Go 领域相关微服务治理的标准和最佳实践的建设,为一些通用性技术和行业最佳实践提供参考; +- 对接开源项目上下游进行深度合作,极大地提升整个项目的易用性; +- 推进高性能 Rust 解决方案的落地,持续探索 Rust 高性能技术解决方案,构建 Rust 相关生态。 如果大家对 CloudWeGo 开源社区,以及刚才提到的一些技术解决方案、企业的落地支持有任何的疑问,可以关注 CloudWeGo 公众号, 我们会在公众号上发布一些新闻动态以及各个相关场景的案例报道,同时我们也会在公众号上提供相关的技术支持。感谢大家的关注! diff --git a/content/zh/blog/news/open_source_volo/index.md b/content/zh/blog/news/open_source_volo/index.md index 550ffa51d0..aacd02170e 100644 --- a/content/zh/blog/news/open_source_volo/index.md +++ b/content/zh/blog/news/open_source_volo/index.md @@ -1,6 +1,7 @@ --- date: 2022-08-30 title: "国内首个基于 Rust 语言的 RPC 框架 — Volo 正式开源!" +projects: ["Volo"] linkTitle: "国内首个基于 Rust 语言的 RPC 框架 — Volo 正式开源!" keywords: ["rust", "rpc", "volo", "开源", "GAT"] description: "本文介绍了字节跳动正式开源 Rust RPC 框架 — Volo,并着重介绍了项目的起源,主要特性以及相关生态。" diff --git a/content/zh/blog/releases/Hertz/_index.md b/content/zh/blog/releases/Hertz/_index.md index cfca9040b6..5bc00f1b4e 100644 --- a/content/zh/blog/releases/Hertz/_index.md +++ b/content/zh/blog/releases/Hertz/_index.md @@ -1,5 +1,6 @@ --- title: "Hertz Release" linkTitle: "Hertz" +projects: ["Hertz"] weight: 2 --- diff --git a/content/zh/blog/releases/Hertz/release-v010.md b/content/zh/blog/releases/Hertz/release-v010.md index f6678dbda8..df52e75059 100644 --- a/content/zh/blog/releases/Hertz/release-v010.md +++ b/content/zh/blog/releases/Hertz/release-v010.md @@ -1,6 +1,7 @@ --- title: "Hertz v0.1.0 版本发布" linkTitle: "Release v0.1.0" +projects: ["Hertz"] date: 2022-06-20 description: > --- diff --git a/content/zh/blog/releases/Hertz/release-v020.md b/content/zh/blog/releases/Hertz/release-v020.md index 40d35d3b8c..eedd058627 100644 --- a/content/zh/blog/releases/Hertz/release-v020.md +++ b/content/zh/blog/releases/Hertz/release-v020.md @@ -1,6 +1,7 @@ --- title: "Hertz v0.2.0 版本发布" linkTitle: "Release v0.2.0" +projects: ["Hertz"] date: 2022-07-22 description: > --- diff --git a/content/zh/blog/releases/Hertz/release-v030.md b/content/zh/blog/releases/Hertz/release-v030.md index 876b11dcca..e021541d0e 100644 --- a/content/zh/blog/releases/Hertz/release-v030.md +++ b/content/zh/blog/releases/Hertz/release-v030.md @@ -1,6 +1,7 @@ --- title: "Hertz v0.3.0 版本发布" linkTitle: "Release v0.3.0" +projects: ["Hertz"] date: 2022-08-29 description: > --- diff --git a/content/zh/blog/releases/Hertz/release-v032.md b/content/zh/blog/releases/Hertz/release-v032.md index 35d48ccb23..77f6ee53f0 100644 --- a/content/zh/blog/releases/Hertz/release-v032.md +++ b/content/zh/blog/releases/Hertz/release-v032.md @@ -1,6 +1,7 @@ --- title: "Hertz v0.3.2 版本发布" linkTitle: "Release v0.3.2" +projects: ["Hertz"] date: 2022-09-20 description: > --- diff --git a/content/zh/blog/releases/Hertz/release-v040.md b/content/zh/blog/releases/Hertz/release-v040.md index 5bc3695859..dcd8602b64 100644 --- a/content/zh/blog/releases/Hertz/release-v040.md +++ b/content/zh/blog/releases/Hertz/release-v040.md @@ -1,6 +1,7 @@ --- title: "Hertz v0.4.0 版本发布" linkTitle: "Release v0.4.0" +projects: ["Hertz"] date: 2022-10-28 description: > --- diff --git a/content/zh/blog/releases/Hertz/release-v050.md b/content/zh/blog/releases/Hertz/release-v050.md index cd398e8788..408cd31225 100644 --- a/content/zh/blog/releases/Hertz/release-v050.md +++ b/content/zh/blog/releases/Hertz/release-v050.md @@ -1,6 +1,7 @@ --- title: "Hertz v0.5.0 版本发布" linkTitle: "Release v0.5.0" +projects: ["Hertz"] date: 2023-01-12 description: > --- diff --git a/content/zh/blog/releases/Hertz/release-v060.md b/content/zh/blog/releases/Hertz/release-v060.md index 9aedf38126..000d12e5f6 100644 --- a/content/zh/blog/releases/Hertz/release-v060.md +++ b/content/zh/blog/releases/Hertz/release-v060.md @@ -1,6 +1,7 @@ --- title: "Hertz v0.6.0 版本发布" linkTitle: "Release v0.6.0" +projects: ["Hertz"] date: 2023-03-02 description: > --- diff --git a/content/zh/blog/releases/Kitex/_index.md b/content/zh/blog/releases/Kitex/_index.md index 37162be203..8cad80ba7e 100644 --- a/content/zh/blog/releases/Kitex/_index.md +++ b/content/zh/blog/releases/Kitex/_index.md @@ -1,5 +1,6 @@ --- title: "Kitex Release" linkTitle: "Kitex" +projects: ["Kitex"] weight: 1 --- \ No newline at end of file diff --git a/content/zh/blog/releases/Kitex/release-v001.md b/content/zh/blog/releases/Kitex/release-v001.md index de689fe241..40b094a956 100755 --- a/content/zh/blog/releases/Kitex/release-v001.md +++ b/content/zh/blog/releases/Kitex/release-v001.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.0.1 版本发布" linkTitle: "Release v0.0.1" +projects: ["Kitex"] date: 2021-07-12 description: > diff --git a/content/zh/blog/releases/Kitex/release-v002.md b/content/zh/blog/releases/Kitex/release-v002.md index 3a2cfc6f10..da991a641c 100644 --- a/content/zh/blog/releases/Kitex/release-v002.md +++ b/content/zh/blog/releases/Kitex/release-v002.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.0.2 版本发布" linkTitle: "Release v0.0.2" +projects: ["Kitex"] date: 2021-07-30 description: > diff --git a/content/zh/blog/releases/Kitex/release-v003.md b/content/zh/blog/releases/Kitex/release-v003.md index 492e73bfac..61d9eefa4c 100644 --- a/content/zh/blog/releases/Kitex/release-v003.md +++ b/content/zh/blog/releases/Kitex/release-v003.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.0.3 版本发布" linkTitle: "Release v0.0.3" +projects: ["Kitex"] date: 2021-08-01 description: > diff --git a/content/zh/blog/releases/Kitex/release-v004.md b/content/zh/blog/releases/Kitex/release-v004.md index 5ec20200ef..3403f81fed 100644 --- a/content/zh/blog/releases/Kitex/release-v004.md +++ b/content/zh/blog/releases/Kitex/release-v004.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.0.4 版本发布" linkTitle: "Release v0.0.4" +projects: ["Kitex"] date: 2021-08-26 description: > --- diff --git a/content/zh/blog/releases/Kitex/release-v005.md b/content/zh/blog/releases/Kitex/release-v005.md index 336ffb47ae..a4cf147f9a 100644 --- a/content/zh/blog/releases/Kitex/release-v005.md +++ b/content/zh/blog/releases/Kitex/release-v005.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.0.5 版本发布" linkTitle: "Release v0.0.5" +projects: ["Kitex"] date: 2021-09-26 description: > diff --git a/content/zh/blog/releases/Kitex/release-v008.md b/content/zh/blog/releases/Kitex/release-v008.md index c5a9ed9d18..8b29c74897 100644 --- a/content/zh/blog/releases/Kitex/release-v008.md +++ b/content/zh/blog/releases/Kitex/release-v008.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.0.8 版本发布" linkTitle: "Release v0.0.8" +projects: ["Kitex"] date: 2021-11-05 description: > --- diff --git a/content/zh/blog/releases/Kitex/release-v010.md b/content/zh/blog/releases/Kitex/release-v010.md index 9477a298f6..cb4562d7ba 100644 --- a/content/zh/blog/releases/Kitex/release-v010.md +++ b/content/zh/blog/releases/Kitex/release-v010.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.1.0 版本发布" linkTitle: "Release v0.1.0" +projects: ["Kitex"] date: 2021-12-13 description: > diff --git a/content/zh/blog/releases/Kitex/release-v012.md b/content/zh/blog/releases/Kitex/release-v012.md index f9060b756f..7d480226c5 100644 --- a/content/zh/blog/releases/Kitex/release-v012.md +++ b/content/zh/blog/releases/Kitex/release-v012.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.1.2 版本发布" linkTitle: "Release v0.1.2" +projects: ["Kitex"] date: 2021-12-22 description: > diff --git a/content/zh/blog/releases/Kitex/release-v013.md b/content/zh/blog/releases/Kitex/release-v013.md index e6ca022421..7b8187fc9f 100644 --- a/content/zh/blog/releases/Kitex/release-v013.md +++ b/content/zh/blog/releases/Kitex/release-v013.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.1.3 版本发布" linkTitle: "Release v0.1.3" +projects: ["Kitex"] date: 2021-12-30 description: > diff --git a/content/zh/blog/releases/Kitex/release-v014.md b/content/zh/blog/releases/Kitex/release-v014.md index cc7051f0e7..d25e6e5fe3 100644 --- a/content/zh/blog/releases/Kitex/release-v014.md +++ b/content/zh/blog/releases/Kitex/release-v014.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.1.4 版本发布" linkTitle: "Release v0.1.4" +projects: ["Kitex"] date: 2022-01-18 description: > diff --git a/content/zh/blog/releases/Kitex/release-v020.md b/content/zh/blog/releases/Kitex/release-v020.md index 45984b3d12..bb2eda679c 100644 --- a/content/zh/blog/releases/Kitex/release-v020.md +++ b/content/zh/blog/releases/Kitex/release-v020.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.2.0 版本发布" linkTitle: "Release v0.2.0" +projects: ["Kitex"] date: 2022-02-24 description: > diff --git a/content/zh/blog/releases/Kitex/release-v021.md b/content/zh/blog/releases/Kitex/release-v021.md index adcd5d95ae..09bf33a101 100644 --- a/content/zh/blog/releases/Kitex/release-v021.md +++ b/content/zh/blog/releases/Kitex/release-v021.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.2.1 版本发布" linkTitle: "Release v0.2.1" +projects: ["Kitex"] date: 2022-03-24 description: > diff --git a/content/zh/blog/releases/Kitex/release-v030.md b/content/zh/blog/releases/Kitex/release-v030.md index 635e5703b1..648c381bef 100644 --- a/content/zh/blog/releases/Kitex/release-v030.md +++ b/content/zh/blog/releases/Kitex/release-v030.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.3.0 版本发布" linkTitle: "Release v0.3.0" +projects: ["Kitex"] date: 2022-04-29 description: > diff --git a/content/zh/blog/releases/Kitex/release-v032.md b/content/zh/blog/releases/Kitex/release-v032.md index 0254adccf6..8c7816c209 100644 --- a/content/zh/blog/releases/Kitex/release-v032.md +++ b/content/zh/blog/releases/Kitex/release-v032.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.3.2 版本发布" linkTitle: "Release v0.3.2" +projects: ["Kitex"] date: 2022-06-02 description: > diff --git a/content/zh/blog/releases/Kitex/release-v040.md b/content/zh/blog/releases/Kitex/release-v040.md index 8e110135d8..64470243d5 100644 --- a/content/zh/blog/releases/Kitex/release-v040.md +++ b/content/zh/blog/releases/Kitex/release-v040.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.4.0 版本发布" linkTitle: "Release v0.4.0" +projects: ["Kitex"] date: 2022-08-26 description: > --- diff --git a/content/zh/blog/releases/Kitex/release-v043.md b/content/zh/blog/releases/Kitex/release-v043.md index 0361c641f7..7aeb6945ad 100644 --- a/content/zh/blog/releases/Kitex/release-v043.md +++ b/content/zh/blog/releases/Kitex/release-v043.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.4.3 版本发布" linkTitle: "Release v0.4.3" +projects: ["Kitex"] date: 2022-11-02 description: > --- diff --git a/content/zh/blog/releases/Kitex/release-v050.md b/content/zh/blog/releases/Kitex/release-v050.md index bbfd32509e..2b5c253f6a 100644 --- a/content/zh/blog/releases/Kitex/release-v050.md +++ b/content/zh/blog/releases/Kitex/release-v050.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.5.0 版本发布" linkTitle: "Release v0.5.0" +projects: ["Kitex"] date: 2023-03-08 description: > --- diff --git a/content/zh/blog/releases/Kitex/release-v052.md b/content/zh/blog/releases/Kitex/release-v052.md index a596013b42..92743354da 100644 --- a/content/zh/blog/releases/Kitex/release-v052.md +++ b/content/zh/blog/releases/Kitex/release-v052.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.5.3 版本发布" linkTitle: "Release v0.5.3" +projects: ["Kitex"] date: 2023-04-21 description: > --- diff --git a/content/zh/blog/releases/Kitex/release-v060.md b/content/zh/blog/releases/Kitex/release-v060.md index a554e3e783..72bf3067cd 100644 --- a/content/zh/blog/releases/Kitex/release-v060.md +++ b/content/zh/blog/releases/Kitex/release-v060.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.6.0 版本发布" linkTitle: "Release v0.6.0" +projects: ["Kitex"] date: 2023-06-14 description: > --- diff --git a/content/zh/blog/releases/Kitex/release-v061.md b/content/zh/blog/releases/Kitex/release-v061.md index 98d2128b5d..2f06f2976a 100644 --- a/content/zh/blog/releases/Kitex/release-v061.md +++ b/content/zh/blog/releases/Kitex/release-v061.md @@ -1,6 +1,7 @@ --- title: "Kitex v0.6.1 版本发布" linkTitle: "Release v0.6.1" +projects: ["Kitex"] date: 2023-06-19 description: > --- diff --git a/content/zh/blog/releases/Netpoll/_index.md b/content/zh/blog/releases/Netpoll/_index.md index 07f7480494..116b2bf284 100644 --- a/content/zh/blog/releases/Netpoll/_index.md +++ b/content/zh/blog/releases/Netpoll/_index.md @@ -1,5 +1,6 @@ --- title: "Netpoll Release" linkTitle: "Netpoll" +projects: ["Netpoll"] weight: 3 --- diff --git a/content/zh/blog/releases/Netpoll/release-v004.md b/content/zh/blog/releases/Netpoll/release-v004.md index fd1c76976e..12db4f1529 100644 --- a/content/zh/blog/releases/Netpoll/release-v004.md +++ b/content/zh/blog/releases/Netpoll/release-v004.md @@ -1,9 +1,9 @@ --- title: "Netpoll v0.0.4 版本发布" linkTitle: "Release v0.0.4" +projects: ["Netpoll"] date: 2021-09-16 description: > - --- ## 优化: @@ -13,11 +13,10 @@ description: > - 返回 nocopy rw 的真实错误 - 更改了循环策略的默认数量 - 重新定义了 EventLoop.Serve arg: Listener -> net.Listener -- 在 DisableGopool 中增加了API +- 在 DisableGopool 中增加了 API - 删除了读锁 - 连接 Flush API 调整为阻塞的 ## Bug 修复: - 设置剩余待读取大小。 - diff --git a/content/zh/blog/releases/Netpoll/release-v010.md b/content/zh/blog/releases/Netpoll/release-v010.md index 98757f7438..9fae6c6db5 100644 --- a/content/zh/blog/releases/Netpoll/release-v010.md +++ b/content/zh/blog/releases/Netpoll/release-v010.md @@ -1,9 +1,9 @@ --- title: "Netpoll v0.1.0 版本发布" linkTitle: "Release v0.1.0" +projects: ["Netpoll"] date: 2021-12-01 description: > - --- ## 功能: @@ -14,17 +14,15 @@ description: > - 优化方案:减少了 waitRead 和 inputAck 冲突时产生的超时错误 - 逻辑简化:简化了连接状态机 - ## Bug 修复: - 修复了 eventLoop 提前 GC 的 bug ## 文档 -- 更新 README,将 Performance 部分移动至 netpoll-benchmark 项目 +- 更新 README,将 Performance 部分移动至 netpoll-benchmark 项目 - 更新了 reference,添加了官网信息,移除了 change log ## 重大变更 - WriteBuffer 返回值由 (n int, err error) 改为 (err error) - diff --git a/content/zh/blog/releases/Netpoll/release-v011.md b/content/zh/blog/releases/Netpoll/release-v011.md index f4803195a9..d644f484a6 100644 --- a/content/zh/blog/releases/Netpoll/release-v011.md +++ b/content/zh/blog/releases/Netpoll/release-v011.md @@ -1,16 +1,15 @@ --- title: "Netpoll v0.1.1 版本发布" linkTitle: "Release v0.1.1" +projects: ["Netpoll"] date: 2021-12-09 description: > - --- ## 优化: - 优化了多路复用下,分片队列的性能 - ## Bug 修复: - 修复了 book 方法在多路复用下的 bug @@ -18,4 +17,3 @@ description: > ## 文档 - 修正了一些大小写和语法问题,并更新了链接 - diff --git a/content/zh/blog/releases/Netpoll/release-v012.md b/content/zh/blog/releases/Netpoll/release-v012.md index 4e3e6e1b8d..7a143c55fc 100644 --- a/content/zh/blog/releases/Netpoll/release-v012.md +++ b/content/zh/blog/releases/Netpoll/release-v012.md @@ -1,14 +1,11 @@ --- title: "Netpoll v0.1.2 版本发布" linkTitle: "Release v0.1.2" +projects: ["Netpoll"] date: 2021-12-13 description: > - --- ## Bug 修复: - LinkBuffer 增加了空值校验 - - - diff --git a/content/zh/blog/releases/Netpoll/release-v020.md b/content/zh/blog/releases/Netpoll/release-v020.md index 28b5fe3988..caa6b71c8b 100644 --- a/content/zh/blog/releases/Netpoll/release-v020.md +++ b/content/zh/blog/releases/Netpoll/release-v020.md @@ -1,25 +1,25 @@ --- title: "Netpoll v0.2.0 版本发布" linkTitle: "Release v0.2.0" +projects: ["Netpoll"] date: 2022-02-22 description: > - --- ## Improvement -* Feat: 添加 OnConnect 回调 -* Feat: 新增 Until API -* Feat: 支持不带 timeout 的 dial +- Feat: 添加 OnConnect 回调 +- Feat: 新增 Until API +- Feat: 支持不带 timeout 的 dial ## Fix -* Fix: 修复当只设置了 onConnect 回调时,不会触发 close callback 的 bug -* Fix: 添加最大节点限制,避免异常情况下的 OOM 问题 -* Fix: 修复 reset operator 时,没有 reset OnWrite 的问题 -* Fix: 修复连接关闭时,写 panic 的问题 -* Fix: 修复单测失败问题 +- Fix: 修复当只设置了 onConnect 回调时,不会触发 close callback 的 bug +- Fix: 添加最大节点限制,避免异常情况下的 OOM 问题 +- Fix: 修复 reset operator 时,没有 reset OnWrite 的问题 +- Fix: 修复连接关闭时,写 panic 的问题 +- Fix: 修复单测失败问题 ## Chore -* docs: 更新 readme +- docs: 更新 readme diff --git a/content/zh/blog/releases/Netpoll/release-v022.md b/content/zh/blog/releases/Netpoll/release-v022.md index 378a004211..1c3c1d5632 100644 --- a/content/zh/blog/releases/Netpoll/release-v022.md +++ b/content/zh/blog/releases/Netpoll/release-v022.md @@ -1,30 +1,29 @@ --- title: "Netpoll v0.2.2 版本发布" linkTitle: "Release v0.2.2" +projects: ["Netpoll"] date: 2022-04-28 description: > - --- ## Improvement -* Fix: Loops 缩容不再全部重置 -* Chore: mcache bsr 计算使用 math/bits.Len 代替,以提升性能。 -* Feat: 修复 LinkBuffer Close 时没有回收 caches 的问题(不是内存泄漏) +- Fix: Loops 缩容不再全部重置 +- Chore: mcache bsr 计算使用 math/bits.Len 代替,以提升性能。 +- Feat: 修复 LinkBuffer Close 时没有回收 caches 的问题(不是内存泄漏) ## Fix -* Fix: 修复短链接 send&close 场景无法触发 OnRequest 回调的问题 -* Fix: 修复 zcReader 读到 io.EOF 后丢失部分数据的问题 -* Fix: 修复 flush 没有检查连接关闭的问题 +- Fix: 修复短链接 send&close 场景无法触发 OnRequest 回调的问题 +- Fix: 修复 zcReader 读到 io.EOF 后丢失部分数据的问题 +- Fix: 修复 flush 没有检查连接关闭的问题 ## Doc -* Doc: 更新了用户文档 -* Doc: 增加了 Reader.Slice 的定义描述 -* Doc: 修复了 examples 中的死链 +- Doc: 更新了用户文档 +- Doc: 增加了 Reader.Slice 的定义描述 +- Doc: 修复了 examples 中的死链 ## Revert -* Revert: 重置了 loops 初始化数量 - +- Revert: 重置了 loops 初始化数量 diff --git a/content/zh/blog/releases/Netpoll/release-v030.md b/content/zh/blog/releases/Netpoll/release-v030.md index dc0cea5572..d9f84c625c 100644 --- a/content/zh/blog/releases/Netpoll/release-v030.md +++ b/content/zh/blog/releases/Netpoll/release-v030.md @@ -1,19 +1,19 @@ --- title: "Netpoll v0.3.0 版本发布" linkTitle: "Release v0.3.0" +projects: ["Netpoll"] date: 2022-11-09 description: > --- ## Feat -* [[#206](https://github.com/cloudwego/netpoll/pull/206)] feat: 连接 Flush 接口支持写超时设置。 -* [[#182](https://github.com/cloudwego/netpoll/pull/182)] feat: 支持在 ipv6 only 环境下创建连接。 +- [[#206](https://github.com/cloudwego/netpoll/pull/206)] feat: 连接 Flush 接口支持写超时设置。 +- [[#182](https://github.com/cloudwego/netpoll/pull/182)] feat: 支持在 ipv6 only 环境下创建连接。 ## Fix -* [[#200](https://github.com/cloudwego/netpoll/pull/200)] fix: 修复 #166 中的代码错误:close fd 没有正确的被 detach。 -* [[#196](https://github.com/cloudwego/netpoll/pull/196)] fix: 系统 io 调用使用 int32 存储 size, 超限调用会导致 EINVAL。 -* [[#179](https://github.com/cloudwego/netpoll/pull/179)] fix: 修复 buffer 长度 int32 溢出的问题。 -* [[#183](https://github.com/cloudwego/netpoll/pull/183)] fix: 当 EPOLLERR 发生时,跳过检查 EPOLLOUT。 - +- [[#200](https://github.com/cloudwego/netpoll/pull/200)] fix: 修复 #166 中的代码错误:close fd 没有正确的被 detach。 +- [[#196](https://github.com/cloudwego/netpoll/pull/196)] fix: 系统 io 调用使用 int32 存储 size, 超限调用会导致 EINVAL。 +- [[#179](https://github.com/cloudwego/netpoll/pull/179)] fix: 修复 buffer 长度 int32 溢出的问题。 +- [[#183](https://github.com/cloudwego/netpoll/pull/183)] fix: 当 EPOLLERR 发生时,跳过检查 EPOLLOUT。 diff --git a/content/zh/blog/releases/Netpoll/release-v040.md b/content/zh/blog/releases/Netpoll/release-v040.md index 5eb458fe11..9c0ae025bc 100644 --- a/content/zh/blog/releases/Netpoll/release-v040.md +++ b/content/zh/blog/releases/Netpoll/release-v040.md @@ -1,21 +1,22 @@ --- title: "Netpoll v0.4.0 版本发布" linkTitle: "Release v0.4.0" +projects: ["Netpoll"] date: 2023-06-14 description: > --- ## Feature: -- [[#249](https://github.com/cloudwego/netpoll/pull/249)] feat: 添加Detach函数来支持从连接的poller中删除连接 +- [[#249](https://github.com/cloudwego/netpoll/pull/249)] feat: 添加 Detach 函数来支持从连接的 poller 中删除连接 ## Optimize: -- [[#250](https://github.com/cloudwego/netpoll/pull/250)] optimize: 优化WriteDirect实现,避免remainLen为0时panic和重复创建冗余的LinkBufferNode. +- [[#250](https://github.com/cloudwego/netpoll/pull/250)] optimize: 优化 WriteDirect 实现,避免 remainLen 为 0 时 panic 和重复创建冗余的 LinkBufferNode. ## Bugfix: -- [[#256](https://github.com/cloudwego/netpoll/pull/256)] fix: 调用 openPoll 失败时关闭已经创建的 poll +- [[#256](https://github.com/cloudwego/netpoll/pull/256)] fix: 调用 openPoll 失败时关闭已经创建的 poll - [[#251](https://github.com/cloudwego/netpoll/pull/251)] fix: err to e0 - [[#226](https://github.com/cloudwego/netpoll/pull/226)] fix: 在关闭连接前 poller 读取所有未读的 data - [[#237](https://github.com/cloudwego/netpoll/pull/237)] fix: shard queue 状态关闭错误 diff --git a/content/zh/blog/releases/Volo/_index.md b/content/zh/blog/releases/Volo/_index.md index 9fab39d7c4..f63f18d104 100644 --- a/content/zh/blog/releases/Volo/_index.md +++ b/content/zh/blog/releases/Volo/_index.md @@ -1,6 +1,7 @@ --- title: "Volo Release" linkTitle: "Volo" +projects: ["Volo"] weight: 4 --- diff --git a/content/zh/blog/releases/Volo/release-v020.md b/content/zh/blog/releases/Volo/release-v020.md index 287185cc82..77ee372991 100644 --- a/content/zh/blog/releases/Volo/release-v020.md +++ b/content/zh/blog/releases/Volo/release-v020.md @@ -1,6 +1,7 @@ --- title: "Volo v0.2.0 版本发布" linkTitle: "Release v0.2.0" +projects: ["Volo"] date: 2022-10-18 description: > --- diff --git a/content/zh/blog/releases/Volo/release-v021.md b/content/zh/blog/releases/Volo/release-v021.md index be498407c1..2672fdb166 100644 --- a/content/zh/blog/releases/Volo/release-v021.md +++ b/content/zh/blog/releases/Volo/release-v021.md @@ -1,6 +1,7 @@ --- title: "Volo v0.2.1 版本发布" linkTitle: "Release v0.2.1" +projects: ["Volo"] date: 2022-10-26 description: > --- diff --git a/content/zh/blog/releases/Volo/release-v030.md b/content/zh/blog/releases/Volo/release-v030.md index 92e81c39b8..44f824b0f7 100644 --- a/content/zh/blog/releases/Volo/release-v030.md +++ b/content/zh/blog/releases/Volo/release-v030.md @@ -1,6 +1,7 @@ --- title: "Volo 0.3.0 版本发布" linkTitle: "Release v0.3.0" +projects: ["Volo"] date: 2022-12-22 description: > --- diff --git a/content/zh/blog/releases/Volo/release-v032.md b/content/zh/blog/releases/Volo/release-v032.md index 8766b30af8..976d7aeabd 100644 --- a/content/zh/blog/releases/Volo/release-v032.md +++ b/content/zh/blog/releases/Volo/release-v032.md @@ -1,6 +1,7 @@ --- title: "Volo 0.3.2 版本发布" linkTitle: "Release v0.3.2" +projects: ["Volo"] date: 2023-02-07 description: > --- diff --git a/content/zh/blog/releases/Volo/release-v041.md b/content/zh/blog/releases/Volo/release-v041.md index d254ba6f67..2362b44605 100644 --- a/content/zh/blog/releases/Volo/release-v041.md +++ b/content/zh/blog/releases/Volo/release-v041.md @@ -1,6 +1,7 @@ --- title: 'Volo 0.4.1 版本发布' linkTitle: 'Release v0.4.1' +projects: ["Volo"] date: 2023-03-20 description: > --- diff --git a/content/zh/blog/releases/Volo/release-v050.md b/content/zh/blog/releases/Volo/release-v050.md index 4e767d1603..d4c0586533 100644 --- a/content/zh/blog/releases/Volo/release-v050.md +++ b/content/zh/blog/releases/Volo/release-v050.md @@ -1,6 +1,7 @@ --- title: 'Volo 0.5.0 版本发布' linkTitle: 'Release v0.5.0' +projects: ["Volo"] date: 2023-06-02 description: > --- diff --git a/content/zh/blog/releases/_index.md b/content/zh/blog/releases/_index.md index 0434d97f2a..ceec0de5fa 100644 --- a/content/zh/blog/releases/_index.md +++ b/content/zh/blog/releases/_index.md @@ -1,6 +1,7 @@ --- title: "发布" +projects: [] linkTitle: "发布" weight: 2 --- diff --git a/content/zh/docs/hertz/tutorials/basic-feature/protocol/http3.md b/content/zh/docs/hertz/tutorials/basic-feature/protocol/http3.md index 7f7426ab19..88969a0159 100644 --- a/content/zh/docs/hertz/tutorials/basic-feature/protocol/http3.md +++ b/content/zh/docs/hertz/tutorials/basic-feature/protocol/http3.md @@ -42,10 +42,10 @@ h.AddProtocol(suite.HTTP3, factory.NewServerFactory(&http3.Option{})) | 配置 | 说明 | | :----------------- | -------------------------------------------- | | WithTransport | 设置 HTTP3 实现的网络库 `quic.NewTransporter` | -| WithAltTransport | 设置备用网络库 `netpoll` 或 `go net`,适用于同时在 TCP 和 QUIC 监听的场景 | -| WithALPN | 设置是否启用 ALPN | +| WithAltTransport | 设置备用网络库 `netpoll` 或 `go net`,适用于同时在 TCP 和 QUIC 监听的场景 | +| WithALPN | 设置是否启用 ALPN | | WithTLS | 设置 TLS 配置 | -| WithHostPorts | 设置开启服务的域名和端口号 | +| WithHostPorts | 设置开启服务的域名和端口号 | ## 示例代码 @@ -236,5 +236,3 @@ func main() { wg.Wait() } ``` - - diff --git a/i18n/en.toml b/i18n/en.toml index 184ba2bc2a..240ac47caf 100644 --- a/i18n/en.toml +++ b/i18n/en.toml @@ -75,3 +75,8 @@ other = "Please tell us how we can improve" other = "Were you looking for:" [error_page_title] other = "404, Page not found" + +[taxo.projects] +title = 'Out projects' +[taxo.page.header] +projects = 'Projects' diff --git a/i18n/zh.toml b/i18n/zh.toml index 2d102086d5..a753e8dd77 100644 --- a/i18n/zh.toml +++ b/i18n/zh.toml @@ -69,3 +69,8 @@ other = "请告诉我们如何改进" other = "也许你在寻找这些:" [error_page_title] other = "404, 访问的页面并不存在" + +[taxo.projects] +title = '项目列表' +[taxo.page.header] +projects = '项目' diff --git a/layouts/community/baseof.html b/layouts/community/baseof.html index 20af4e6526..97505ec3fb 100644 --- a/layouts/community/baseof.html +++ b/layouts/community/baseof.html @@ -16,7 +16,7 @@
{{ partial "version-banner.html" . }} @@ -29,4 +29,4 @@ {{ partial "scripts.html" . }} - \ No newline at end of file + diff --git a/layouts/docs/baseof.html b/layouts/docs/baseof.html index 20af4e6526..97505ec3fb 100644 --- a/layouts/docs/baseof.html +++ b/layouts/docs/baseof.html @@ -16,7 +16,7 @@
{{ partial "version-banner.html" . }} @@ -29,4 +29,4 @@ {{ partial "scripts.html" . }} - \ No newline at end of file + diff --git a/layouts/partials/taxonomy_terms_article.html b/layouts/partials/taxonomy_terms_article.html index 97525eb826..7c3bcdc844 100644 --- a/layouts/partials/taxonomy_terms_article.html +++ b/layouts/partials/taxonomy_terms_article.html @@ -2,11 +2,11 @@ {{ $taxo := .taxo }} {{ if (gt (len ($context.GetTerms $taxo)) 0)}}
-
{{ humanize $taxo }}:
+
{{ i18n (printf "taxo.page.header.%s" $taxo)}}:
-{{ end }} \ No newline at end of file +{{ end }} diff --git a/layouts/partials/taxonomy_terms_cloud.html b/layouts/partials/taxonomy_terms_cloud.html index 45b57e3a48..d4fe072633 100644 --- a/layouts/partials/taxonomy_terms_cloud.html +++ b/layouts/partials/taxonomy_terms_cloud.html @@ -6,13 +6,13 @@ {{ if (gt (len $taxonomy) 0)}}
{{ with $title }} -
{{ . }}
+
{{ i18n . }}
{{ end }}
{{ end }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/layouts/security/baseof.html b/layouts/security/baseof.html index 20af4e6526..97505ec3fb 100644 --- a/layouts/security/baseof.html +++ b/layouts/security/baseof.html @@ -16,7 +16,7 @@
{{ partial "version-banner.html" . }} @@ -29,4 +29,4 @@ {{ partial "scripts.html" . }} - \ No newline at end of file + diff --git a/layouts/swagger/baseof.html b/layouts/swagger/baseof.html index bf7baadd10..704cccacc4 100644 --- a/layouts/swagger/baseof.html +++ b/layouts/swagger/baseof.html @@ -18,7 +18,7 @@
{{ if not .Site.Params.ui.breadcrumb_disable }}{{ partial "breadcrumb.html" . }}{{ end }}