From 88aa810c44e2d3180922ec6b8a070406e4212ca0 Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Mon, 1 Sep 2025 10:58:55 +0200 Subject: [PATCH] Update _clickpipes_faq.md --- docs/cloud/_snippets/_clickpipes_faq.md | 145 ++++++++++++++++++++++++ 1 file changed, 145 insertions(+) diff --git a/docs/cloud/_snippets/_clickpipes_faq.md b/docs/cloud/_snippets/_clickpipes_faq.md index e69de29bb2d..00485d0f91f 100644 --- a/docs/cloud/_snippets/_clickpipes_faq.md +++ b/docs/cloud/_snippets/_clickpipes_faq.md @@ -0,0 +1,145 @@ +import Image from '@theme/IdealImage'; +import clickpipesPricingFaq1 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_1.png'; +import clickpipesPricingFaq2 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_2.png'; +import clickpipesPricingFaq3 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_3.png'; + +
+ +Why are we introducing a pricing model for ClickPipes now? + +We decided to initially launch ClickPipes for free with the idea to gather +feedback, refine features, and ensure it meets user needs. +As the GA platform has grown, it has effectively stood the test of time by +moving trillions of rows. Introducing a pricing model allows us to continue +improving the service, maintaining the infrastructure, and providing dedicated +support and new connectors. + +
+ +
+ +What are ClickPipes replicas? + +ClickPipes ingests data from remote data sources via a dedicated infrastructure +that runs and scales independently of the ClickHouse Cloud service. +For this reason, it uses dedicated compute replicas. +The diagrams below show a simplified architecture. + +For streaming ClickPipes, ClickPipes replicas access the remote data sources (e.g., a Kafka broker), +pull the data, process and ingest it into the destination ClickHouse service. + +ClickPipes Replicas - Streaming ClickPipes + +In the case of object storage ClickPipes, +the ClickPipes replica orchestrates the data loading task +(identifying files to copy, maintaining the state, and moving partitions), +while the data is pulled directly from the ClickHouse service. + +ClickPipes Replicas - Object Storage ClickPipes + +
+ +
+ +What's the default number of replicas and their size? + +Each ClickPipe defaults to 1 replica that's provided with 2 GiB of RAM and 0.5 vCPU. +This corresponds to **0.25** ClickHouse compute units (1 unit = 8 GiB RAM, 2 vCPUs). + +
+ +
+ +Can ClickPipes replicas be scaled? + +Yes, ClickPipes for streaming can be scaled both horizontally and vertically. +Horizontal scaling adds more replicas to increase throughput, while vertical scaling increases the resources (CPU and RAM) allocated to each replica to handle more intensive workloads. +This can be configured during ClickPipe creation, or at any other point under **Settings** -> **Advanced Settings** -> **Scaling**. + +
+ +
+ +How many ClickPipes replicas do I need? + +It depends on the workload throughput and latency requirements. +We recommend starting with the default value of 1 replica, measuring your latency, and adding replicas if needed. +Keep in mind that for Kafka ClickPipes, you also have to scale the Kafka broker partitions accordingly. +The scaling controls are available under "settings" for each streaming ClickPipe. + +ClickPipes Replicas - How many ClickPipes replicas do I need? + +
+ +
+ +What does the ClickPipes pricing structure look like? + +It consists of two dimensions: +- **Compute**: Price per unit per hour + Compute represents the cost of running the ClickPipes replica pods whether they actively ingest data or not. + It applies to all ClickPipes types. +- **Ingested data**: per GB pricing + The ingested data rate applies to all streaming ClickPipes + (Kafka, Confluent, Amazon MSK, Amazon Kinesis, Redpanda, WarpStream, + Azure Event Hubs) for the data transferred via the replica pods. + The ingested data size (GB) is charged based on bytes received from the source (uncompressed or compressed). + +
+ +
+ +What are the ClickPipes public prices? + +- Compute: \$0.20 per unit per hour ($0.05 per replica per hour) +- Ingested data: $0.04 per GB + +
+ +
+ +How does it look in an illustrative example? + +For example, ingesting 1 TB of data over 24 hours using the Kafka connector using a single replica (0.25 compute unit) costs: + +$$ +(0.25 \times 0.20 \times 24) + (0.04 \times 1000) = \$41.2 +$$ +
+ +For object storage connectors (S3 and GCS), +only the ClickPipes compute cost is incurred since the ClickPipes pod is not processing data +but only orchestrating the transfer which is operated by the underlying ClickHouse service: + +$$ +0.25 \times 0,20 \times 24 = \$1.2 +$$ + +
+ +
+ +When does the new pricing model take effect? + +The new pricing model takes effect for all organizations created after January 27th, 2025. + +
+ +
+ +What happens to current users? + +Existing users will have a **60-day grace period** where the ClickPipes service continues to be offered for free. +Billing will automatically start for ClickPipes for existing users on **March 24th, 2025.** + +
+ +
+ +How does ClickPipes pricing compare to the market? + +The philosophy behind ClickPipes pricing is +to cover the operating costs of the platform while offering an easy and reliable way to move data to ClickHouse Cloud. +From that angle, our market analysis revealed that we are positioned competitively. + +