diff --git a/public/__redirects b/public/__redirects index 8bc2bfface64ae..1c3065c747a2ac 100644 --- a/public/__redirects +++ b/public/__redirects @@ -202,13 +202,14 @@ /support/analytics/learn-more/cloudflare-analytics-with-workers/ /analytics/account-and-zone-analytics/analytics-with-workers/ 301 /support/analytics/learn-more/how-does-cloudflare-calculate-total-threats-stopped/ /analytics/account-and-zone-analytics/total-threats-stopped/ 301 /support/analytics/learn-more/status-code-metrics-in-cloudflare-site-analytics/ /analytics/account-and-zone-analytics/status-codes/ 301 -/support/analytics/learn-more/understanding-cloudflare-network-analytics-v1/ /analytics/network-analytics/reference/network-analytics-v1/ 301 +/support/analytics/learn-more/understanding-cloudflare-network-analytics-v1/ /analytics/graphql-api/migration-guides/network-analytics-v2/ 301 /support/analytics/learn-more/what-are-the-types-of-threats/ /analytics/account-and-zone-analytics/threat-types/ 301 /analytics/graphql-api/tutorials/build-your-own-analytics/ /analytics/graphql-api/tutorials/ 301 /analytics/graphql-api/tutorials/export-graphql-to-csv/ /analytics/graphql-api/tutorials/ 301 /analytics/analytics-integrations/google-cloud/ /analytics/analytics-integrations/ 301 /analytics/dashboards/ /log-explorer/custom-dashboards/ 301 /analytics/analytics-integrations/looker/ /analytics/analytics-integrations/ 301 +/analytics/network-analytics/reference/network-analytics-v1/ /analytics/graphql-api/migration-guides/network-analytics-v2/ 301 # email-security /email-security/reporting/search/detection-search/ /email-security/reporting/search/ 301 diff --git a/src/content/changelog/workers/2025-09-09-interactive-wrangler-assets.mdx b/src/content/changelog/workers/2025-09-09-interactive-wrangler-assets.mdx new file mode 100644 index 00000000000000..6be3b71930d16d --- /dev/null +++ b/src/content/changelog/workers/2025-09-09-interactive-wrangler-assets.mdx @@ -0,0 +1,63 @@ +--- +title: Deploy static sites to Workers without a configuration file +description: Wrangler now guides you through deploying static assets with interactive prompts and automatic configuration generation when no wrangler.jsonc file exists. +products: + - workers +date: 2025-09-09 +--- + +Deploying static site to Workers is now easier. When you run `wrangler deploy [directory]` or `wrangler deploy --assets [directory]` without an existing [configuration file](/workers/wrangler/configuration/), [Wrangler CLI](/workers/wrangler/) now guides you through the deployment process with interactive prompts. + +## Before and after + +**Before:** Required remembering multiple flags and parameters +```bash +wrangler deploy --assets ./dist --compatibility-date 2025-09-09 --name my-project +``` + +**After:** Simple directory deployment with guided setup +```bash +wrangler deploy dist +# Interactive prompts handle the rest as shown in the example flow above +``` + +## What's new + +**Interactive prompts for missing configuration:** +- Wrangler detects when you're trying to deploy a directory of static assets +- Prompts you to confirm the deployment type +- Asks for a project name (with smart defaults) +- Automatically sets the compatibility date to today + +**Automatic configuration generation:** +- Creates a `wrangler.jsonc` file with your deployment settings +- Stores your choices for future deployments +- Eliminates the need to remember complex command-line flags + +## Example workflow + +```bash +# Deploy your built static site +wrangler deploy dist + +# Wrangler will prompt: +✔ It looks like you are trying to deploy a directory of static assets only. Is this correct? … yes +✔ What do you want to name your project? … my-astro-site + +# Automatically generates a wrangler.jsonc file and adds it to your project: +{ + "name": "my-astro-site", + "compatibility_date": "2025-09-09", + "assets": { + "directory": "dist" + } +} + +# Next time you run wrangler deploy, this will use the configuration in your newly generated wrangler.jsonc file +wrangler deploy +``` + +## Requirements + +- You must use Wrangler version 4.24.4 or later in order to use this feature + diff --git a/src/content/docs/analytics/network-analytics/index.mdx b/src/content/docs/analytics/network-analytics/index.mdx index aaea3da34b7695..1e7dc93a7ee895 100644 --- a/src/content/docs/analytics/network-analytics/index.mdx +++ b/src/content/docs/analytics/network-analytics/index.mdx @@ -27,5 +27,4 @@ For a technical deep-dive into Network Analytics, refer to our [blog post](https * [Cloudflare GraphQL API](/analytics/graphql-api/) * [Cloudflare Logpush](/logs/logpush/) -* [Migrating from Network Analytics v1 to Network Analytics v2](/analytics/graphql-api/migration-guides/network-analytics-v2/) -* [Cloudflare Network Analytics v1](/analytics/network-analytics/reference/network-analytics-v1/) +* [Migrating from Network Analytics v1 to Network Analytics v2](/analytics/graphql-api/migration-guides/network-analytics-v2/) \ No newline at end of file diff --git a/src/content/docs/analytics/network-analytics/reference/network-analytics-v1.mdx b/src/content/docs/analytics/network-analytics/reference/network-analytics-v1.mdx deleted file mode 100644 index 28085c8ef4f907..00000000000000 --- a/src/content/docs/analytics/network-analytics/reference/network-analytics-v1.mdx +++ /dev/null @@ -1,330 +0,0 @@ ---- -pcx_content_type: reference -source: https://support.cloudflare.com/hc/en-us/articles/360038696631-Understanding-Cloudflare-Network-Analytics-v1 -title: Network Analytics v1 -sidebar: - order: 3 - badge: - text: Deprecated -head: - - tag: title - content: Cloudflare Network Analytics v1 (deprecated) -description: Network Analytics v1 (deprecated) provides near real-time - visibility into network and transport-layer traffic patterns and DDoS attacks. - ---- - -import { GlossaryTooltip, Render } from "~/components" - -:::caution - - -* -* If you are using the Network Analytics GraphQL API, you should migrate from NAv1 to NAv2 by following the [migration guide](/analytics/graphql-api/migration-guides/network-analytics-v2/). - ::: - -Access to Network Analytics requires the following: - -* A Cloudflare Enterprise plan -* Cloudflare [Magic Transit](/magic-transit/) or [Spectrum](/spectrum/). - -Cloudflare’s **Network Analytics** view provides near real-time visibility into network and transport-layer traffic patterns and DDoS attacks. Network Analytics visualizes packet and bit-level data, the same data available via the [GraphQL Analytics API](/analytics/graphql-api/). - -![Analytics panel showing packets summary per type](~/assets/images/analytics/network-analytics/v1-main-dashboard.png) - -Network Analytics accelerates reporting and investigation of malicious traffic. You can filter data by these parameters: - -* Mitigation action taken by Cloudflare -* Source IP, port, ASN -* Destination IP and port -* The Cloudflare data center city and country of where the traffic was observed -* Attack size, type, rate, and duration -* TCP flag  -* IP version -* Protocol - -Use Network Analytics to quickly identify key intelligence: - -* Top attack vectors targeting the network  -* Traffic mitigation over time, broken down by action  -* Attack source, by country or data center - -*** - -## Access Network Analytics v1 - -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select an account that has access to Magic Transit or Spectrum. -2. Go to Account Home > **Analytics & Logs** > **Network Analytics**. - -*** - -## Navigate Network Analytics v1 - -### Headline summary and side panels - -The headline and the side panels provide a summary of activity over the period selected in the **timeframe** drop-down list. - -![Headline and side panel summarizing activity over the last 24 hours](~/assets/images/analytics/network-analytics/v1-navigate.png) - -The headline provides the total packets or bits and the number of attacks detected and mitigated. When there is an attack in progress, the headline displays the maximum rate of packets (or bits) rather than the total count. - -To toggle your view of the data, select the **Packets** or **Bits** side panels. - -### Set the timeframe for the view - -Use the **timeframe** drop-down list to change the time range over which Network Analytics displays data. When you select a timeframe, the entire view is updated to reflect your choice. - -When you select *Last 30 minutes*, the **Network Analytics** view displays the data from the last 30 minutes, refreshing every 20 seconds. A *Live* notification appears next to the statistic drop-down list to let you know that the view keeps updating automatically: - -![Auto-refresh enabled in Network Analytics](~/assets/images/analytics/network-analytics/v1-auto-refresh.png) - -When you select the *Custom range* option, you can specify a time range of up to 30 days throughout any period during the last 365 days. - -:::note - -Source IPs are stored for 30 days. Report periods older than 30 days do not include source IP data. -::: - -### View by average rate or total volume  - -Choose a statistic from the drop-down list to toggle between plotting *Average rate* and *Total count*.  - -### Show IP prefix advertisement/withdrawal events - -Enable the **Show annotations** toggle to show or hide annotations for advertised/withdrawn IP prefix events in the **Network Analytics** view. Select each annotation to get more details. - -![Toggle button for displaying annotations in Network Analytics chart](~/assets/images/analytics/network-analytics/v1-show-annotations.png) - -### Zoom into the Packets summary  - -Select and drag to zoom in on a region of the chart. Using this technique you can zoom into a time range as short as three minutes. - -![Zooming into the Packets summary ](/images/analytics/network-analytics/v1-zoom-in.gif) - -To zoom out, select **X** in the **time range** selector. - -*** - -## Apply filters to data - -You can apply multiple filters and exclusions to adjust the scope of the data displayed in Network Analytics. Filters affect all the data displayed in the Network Analytics page. - -There are two ways to filter Network Analytics data: use the **Add filter** button or select one of the stat filters. - -### Use the Add filter button - -Select **Add filter** to open the **New filter** popover. Specify a field, an operator, and a value to complete your filter expression. Select **Apply** to update the view. - -When applying filters, observe these guidelines: - -* Wildcards are not supported. -* You do not need to wrap values in quotes. -* When specifying an ASN number, leave out the *AS* prefix. For example, enter *1423* instead of *AS1423*. - -### Use a stat filter - -To filter based on the type of data associated with one of the Network Analytics stats, use the **Filter** and **Exclude** buttons that display when you hover your pointer over the stat.  - -In this example, selecting **Filter** narrows the scope of the view to only traffic associated with the *Allow* action. - -### Create a Magic Firewall rule from the applied filters - -:::note - -This feature is only available for Magic Transit users. -::: - -You can create a [Magic Firewall](/magic-firewall/) rule that blocks all traffic matching the selected filters in Network Analytics. The currently supported filters are: - -* Destination IP -* Protocol -* Source data center -* Source IP -* TCP flags - -Other types of Network Analytics filters will not be added to the new rule definition. However, you can further configure the rule in Magic Firewall. - -Do the following: - -1. Apply one or more filters in Network Analytics. - -2. Select **Create Magic Firewall rule**. - - ![Link in Network Analytics to create a Magic Firewall rule](~/assets/images/analytics/network-analytics/v1-create-magic-firewall-rule.png) - - The Magic Firewall rule editor displays with the selected filters and values. - -3. Review the rule definition in the Magic Firewall rule editor. - -4. Select **Add new**. - -### Supported filter fields, operators, and values  - -The table below shows the range of fields, operators, and values you can use to filter Network Analytics. - - - -| Field | Operators | Value | -| -------------------- | ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Action | Equals
Does not equal | **Allow:** Traffic allowed through Cloudflare's automated DDoS protection systems. May also include traffic mitigated by Firewall Rules, flowtrackd and L7 rules.
**Block:** Traffic blocked by Cloudflare's automated DDoS protection systems.
**Connection-tracking:** Applies only exclusively to L7, as Magic Transit is excluded from scope and no conntrack ever runs for Magic Transit prefixes.
**Rate-limit:** Can be applied per source IP, subnet or any connection. The decision is made programmatically based on heuristics.
**Monitor:** Attacks which were identified but have chosen to simply observe and not mitigate with any rule. | -| Attack ID | Equals
Does not equal | Attack number | -| Attack Type | Equals
Does not equal | UDP Flood
SYN Flood
ACK Flood
RST Flood
LDAP Flood
Christmas Flood
FIN Flood
GRE Flood
ICMP Flood | -| Destination IP | Equals
Does not equal | IP address | -| Destination Port | Equals
Does not equal
Greater than
Greater than or equals
Less than
Less than or equals | Port number Port range | -| Destination IP range | Equals Does not equal | IP range & mask | -| IP Version | Equals Does not equal | 4 or 6 | -| Protocol | Equals Does not equal | TCP
UDP
ICMP
GRE | -| Source ASN | Equals Does not equal | AS Number | -| Source Country | Equals Does not equal | Country name | -| Source data center | Equals Does not equal | Data center location | -| Source IP | Equals Does not equal | IP address | -| Source port | Equals Does not equal
Greater than
Greater than or equals
Less than
Less than or equals | Port number
Port range | -| TCP Flag | Equals
Does not equal
Contains | SYN, SYN-ACK, FIN, ACK, RST | - - - -*** - -## Select a dimension to plot - -You can plot Network Analytics data along a variety of dimensions. By default, Network Analytics displays data broken down by Action. - -Select one of the **Summary** tabs to view the data along a different dimension. - -You can choose from these options:  - -* Action -* Attack type -* Destination IP -* Destination port -* IP version -* Protocol -* Source ASN -* Source country -* Source data center -* Source IP -* Source port -* TCP flag - -:::note - -Data for source ASN, source IP, source port, and TCP flag is only available over the last 24 hours. -::: - -### Share Network Analytics filters  - -When you add filters and specify a time range in the Network Analytics page, the URL changes to reflect those parameters. - -To share your view of the data, copy the URL and send it to other users so that they can work with the same view. - -![Selecting the URL of the Network Analytics page](~/assets/images/analytics/network-analytics/v1-share-url.png) - -*** - -## View the Activity log - -The Network Analytics **Activity log** shows up to 500 log events in the currently selected time range, paginated with 10 results per page per time range view. (The [GraphQL Analytics API](/analytics/graphql-api/) does not have this limitation.)  - -To display event details, select the expansion widget associated with the events. - -### Configure columns - -To configure which columns display in the Activity log, select **Edit columns**.  - -This is particularly useful when you would like to identify a DDoS attack, during which you can specify the desired attributes such as IP addresses, max bit rate, and attack ID among others. - -### View top items - -The **Source Country,** **Source**, and **Destination** panels display the top items in each view. - -To select the number of items to display, use the drop-down list associated with the view. - -To review the top data centers, select *Data center* from the drop-down list in the **Source country** view. The **Source data center** view replaces the **Source country** view. - -*** - -## Export log data and reports - -### Export activity log data  - -You can export up to 500 raw events from the Activity log at a time. This option is useful when you need to combine and analyze Cloudflare data with data stored in a separate system or database, such as a SIEM system. - -To export log data, select **Export**. - -Choose either CSV or JSON format for rendering exported data. The downloaded file name will reflect the selected time range, using this pattern: - -```txt -network-analytics-attacks-[start_time]-[end_time].json -``` - -### Export a Network Analytics report  - -To print or download a snapshot report from **Network Analytics**, select **Print report**. Your web browser's print interface displays options for printing or saving as a PDF. - -*** - -## Limitations - -Network Analytics currently has these limitations: - -* Network Analytics v1 provides insights on [denial of service daemon (dosd)](https://blog.cloudflare.com/who-ddosd-austin/) attacks. Although it provides a timely view of the data, it does not have a complete view of all events.  - -* The following data sources are not available in Network Analytics v1: - - * Firewall Rules (available in [Network Analytics v2](/analytics/network-analytics/)) - * Application layer rules - * Gatekeeper and manually applied rules - * [flowtrackd](https://blog.cloudflare.com/announcing-flowtrackd/) (Advanced TCP protection) (available in Network Analytics v2) - * WARP traffic and [Orange-clouded traffic](/fundamentals/concepts/how-cloudflare-works/) - -* Data from Cloudflare services that proxy traffic, such as CDN, is not available in Network Analytics. - -*** - -## Frequently asked questions - -### How long does Cloudflare retain data in the Network Analytics portal? - -If you are using Network Analytics v2 (NAv2), the range of historical data you can query is **90 days**. - -Network Analytics v1 (NAv1) uses GraphQL nodes to roll up data into 1 minute, 1 hour, and 1 day IP flows. For example, the ipFlows1mGroups node stores data in minute-wise aggregations. - -To identify the range of historical data you can query in NAv1, refer to this table. Use the ***notOlderThan*** column as an indicator of retention time. - -GraphQL data nodes: - -* `ipFlows1mGroups` - * **maxDuration**[^1]: 25 hours - * **notOlderThan**[^2]: 30 days - * **Time range selection** and **Number of data points**: - * 30 minutes - 30 data points - * 6 hours - 71 data points - * 12 hours - 48 data points - * 24 hours - 96 data points - * 1 week - 168 data points -* `ipFlows1dGroups` - * **maxDuration**[^1]: 6 months - * **notOlderThan**[^2]: 1 year - * **Time range selection** and **Number of data points**: - * 30 minutes - 30 data points - * 6 hours - 71 data points - * 12 hours - 48 data points - * 24 hours - 96 data points - * 1 week - 168 data points - -[^1]: maxDuration defines the time window that can be requested in one query (varies by data node). - -[^2]: notOlderThan limits how far back in the record a query can search. It is indicative of how long the data stays in our database. - -When working with attack logs in the dashboard, keep the following in mind: - -* Attack logs are stored with start and end timestamps, packet and bit statistics for minimum, maximum, and average data rate, as well as totals, attack type, and action taken.  -* Source IP addresses are considered personally identifiable information. Therefore, Cloudflare only stores them for 30 days. After 30 days, source IP addresses are discarded, and the logs are rolled up first into 1-hour groups, then 1-day groups. The 1-hour rollups are stored for 6 month. The one day rollups are stored for 1 year. - -For more information on querying and accessing log data, refer to the [GraphQL Analytics API](/analytics/graphql-api/limits).  - -### Why does Network Analytics say the destination IP is “unavailable”? - -The destination IP is indicated as *Unavailable*, when the destination IP was not included in the real-time signature generated by our [DDoS protection systems](/ddos-protection/).  - -To view the destination IP, filter by **Attack ID** and scroll to the **Destination** section in the top items lists. When you filter on a specific Attack ID, the entire Network Analytics dashboard becomes an attack report. diff --git a/src/content/docs/browser-rendering/platform/browser-close-reasons.mdx b/src/content/docs/browser-rendering/platform/browser-close-reasons.mdx index 8677d0bf70695a..2960697e83003e 100644 --- a/src/content/docs/browser-rendering/platform/browser-close-reasons.mdx +++ b/src/content/docs/browser-rendering/platform/browser-close-reasons.mdx @@ -5,10 +5,19 @@ sidebar: order: 30 --- +import { DashButton } from "~/components"; A browser session may close for a variety of reasons, occasionally due to connection errors or errors in the headless browser instance. As a best practice, wrap `puppeteer.connect` or `puppeteer.launch` in a [`try/catch`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/try...catch) statement. -The reason that a browser closed can be found on the Browser Rendering Dashboard in the [logs tab](https://dash.cloudflare.com/?to=/:account/workers/browser-renderingl/logs). When Cloudflare begins charging for the Browser Rendering API, we will not charge when errors are due to underlying Browser Rendering infrastructure. +To find the reason that a browser closed: + +1. In the Cloudflare dashboard, go to the **Browser Rendering** page. + + + +2. Select the **Logs** tab. + +When Cloudflare begins charging for the Browser Rendering API, we will not charge when errors are due to underlying Browser Rendering infrastructure. | Reasons a session may end | | ---------------------------------------------------- | diff --git a/src/content/docs/browser-rendering/platform/pricing.mdx b/src/content/docs/browser-rendering/platform/pricing.mdx index be0e44665b9071..b87b710bdc75ba 100644 --- a/src/content/docs/browser-rendering/platform/pricing.mdx +++ b/src/content/docs/browser-rendering/platform/pricing.mdx @@ -4,7 +4,7 @@ title: Pricing sidebar: order: 31 --- -import { Details } from "~/components" +import { DashButton } from "~/components" There are two ways to use Browser Rendering. Depending on the method you use, here is how billing works: - [**REST API**](/browser-rendering/rest-api/): Charged for **Duration** only ($/browser hour) @@ -15,14 +15,6 @@ There are two ways to use Browser Rendering. Depending on the method you use, he | **Workers Free** | 10 minutes per day | 3 concurrent browsers | N/A | | **Workers Paid** | 10 hours per month | 10 concurrent browsers (averaged monthly) | **1. REST API**: $0.09 per additional browser hour
**2. Workers Bindings**: $0.09 per additional browser hour
$2.00 per additional concurrent browser | -
-You can monitor your Browser Rendering usage in the [Cloudflare dashboard](https://dash.cloudflare.com). Go to **Compute (Workers)** > **Browser Rendering**. -
- -
-Cloudflare calculates concurrent browsers as the **monthly average of your daily peak usage**. In other words, we record **the peak number of concurrent browsers each day** and then average those values over the month. This approach reflects your typical traffic and ensures you are not disproportionately charged for brief spikes in browser concurrency. -
- ## Examples of Workers Paid pricing
#### Example: REST API pricing @@ -52,7 +44,14 @@ For **browser duration** and **concurrent browsers**: ### How do I estimate my Browser Rendering costs? -You can monitor your Browser Rendering usage in the [Cloudflare dashboard](https://dash.cloudflare.com). Go to **Compute (Workers)** > **Browser Rendering**. Then, you can use [the pricing page](/browser-rendering/platform/pricing/) to estimate your costs. +To monitor your Browser Rendering usage in the Cloudflare dashboard, go to the **Browser Rendering** page. + + + +Then, you can use [the pricing page](/browser-rendering/platform/pricing/) to estimate your costs. ### Do failed API calls, such as those that time out, add to billable browser hours? No. If a request to the Browser Rendering REST API fails with a `waitForTimeout` error, the browser session is not charged. + +### How is the number of concurrent browsers calculated? +Cloudflare calculates concurrent browsers as the **monthly average of your daily peak usage**. In other words, we record **the peak number of concurrent browsers each day** and then average those values over the month. This approach reflects your typical traffic and ensures you are not disproportionately charged for brief spikes in browser concurrency. \ No newline at end of file diff --git a/src/content/docs/browser-rendering/rest-api/index.mdx b/src/content/docs/browser-rendering/rest-api/index.mdx index 75b52573e51988..936520ff9a3b2e 100644 --- a/src/content/docs/browser-rendering/rest-api/index.mdx +++ b/src/content/docs/browser-rendering/rest-api/index.mdx @@ -4,6 +4,7 @@ title: REST API sidebar: order: 2 --- +import { DashButton } from "~/components"; The REST API is a RESTful interface that provides endpoints for common browser actions such as capturing screenshots, extracting HTML content, generating PDFs, and more. The following are the available options: @@ -22,8 +23,9 @@ Before you begin, make sure you [create a custom API Token](/fundamentals/api/ge :::note[Note] -You can monitor Browser Rendering usage in two ways: -- [Cloudflare dashboard](https://dash.cloudflare.com/?to=/:account/workers/browser-rendering): View aggregate metrics, including total REST API requests and total browser hours used. -- `X-Browser-Ms-Used` header: Returned in every REST API response, reporting browser time used for that request (in milliseconds). +You can monitor Browser Rendering usage in two ways: +- In the Cloudflare dashboard, go to the **Browser Rendering** page to view aggregate metrics, including total REST API requests and total browser hours used. + +- `X-Browser-Ms-Used` header: Returned in every REST API response, reporting browser time used for that request (in milliseconds). ::: diff --git a/src/content/docs/cache/how-to/edge-browser-cache-ttl/set-browser-ttl.mdx b/src/content/docs/cache/how-to/edge-browser-cache-ttl/set-browser-ttl.mdx index 7de3391d9efb34..8414faa626b9af 100644 --- a/src/content/docs/cache/how-to/edge-browser-cache-ttl/set-browser-ttl.mdx +++ b/src/content/docs/cache/how-to/edge-browser-cache-ttl/set-browser-ttl.mdx @@ -19,8 +19,6 @@ Nevertheless, the value you set via Cache Rule will be ignored if `Cache-Control ## Set Browser Cache TTL -The Cloudflare UI and [API](/api/resources/zones/subresources/settings/methods/edit/) both prohibit setting Browser Cache TTL to 0 for non-Enterprise domains. - :::note[Note] @@ -34,11 +32,3 @@ If you modify cached assets, the new asset is not displayed to repeat visitors b 3. Under **Browser Cache TTL**, select the drop-down menu to select the desired cache expiration time. The **Respect Existing Headers** option tells Cloudflare to honor the settings in the `Cache-Control` headers from your origin web server. - -:::note[Respect Existing Headers Availability] - - -For all plan types, you can set Browser Cache TTL to Respect Existing Headers at a zone level (zone setting) or you can use a Cache Rule. - - -::: diff --git a/src/content/docs/dns/troubleshooting/faq.mdx b/src/content/docs/dns/troubleshooting/faq.mdx index c597a87707c0d8..d6c9b291e37294 100644 --- a/src/content/docs/dns/troubleshooting/faq.mdx +++ b/src/content/docs/dns/troubleshooting/faq.mdx @@ -241,7 +241,21 @@ example.com CNAME example.com.cdn.cloudflare.net ## Why am I getting a warning for hostname not covered even if I have a custom certificate? If the [custom certificate](/ssl/edge-certificates/custom-certificates/) has been in place before our new certificate management pipeline, the following warning is displayed but can be discarded. - `This hostname is not covered by a certificate.` The warning will be gone when you upload a new custom certificate, or start using another type of certificate for this hostname. + + +--- + +## I've updated my CNAME to a new SaaS provider, but I still see content from the old provider + +When a SaaS provider is leveraging our [Cloudflare for SaaS](/cloudflare-for-platforms/cloudflare-for-saas/) solution, they create a [Custom Hostname](/cloudflare-for-platforms/cloudflare-for-saas/domain-support/) on their Cloudflare zone. +Then a [CNAME record needs to be created](/cloudflare-for-platforms/cloudflare-for-saas/saas-customers/how-it-works/) on the client zone, to point to the SaaS provider service. +When changing SaaS providers, if the old SaaS provider provisioned a specific custom hostname for the record (`mystore.example.com`) and the new SaaS provider provisioned a wildcard custom hostname (`*.example.com`), the old custom hostname will still take precedence. +This is expected as per the [Certificate and hostname priority](https://developers.cloudflare.com/ssl/reference/certificate-and-hostname-priority/#hostname-priority). + +In this case there are 2 ways forward: +- (*Recommended*) Ask the new SaaS provider to provision a specific custom hostname for you instead of the wildcard (`mystore.example.com` instead of `*.example.com`). +- Ask the Super Administrator of your account to contact [Cloudflare Support](/support/contacting-cloudflare-support/) to request an update of the SaaS configuration. + diff --git a/src/content/docs/hyperdrive/get-started.mdx b/src/content/docs/hyperdrive/get-started.mdx index b1590edae7bc50..ed9a23a8042374 100644 --- a/src/content/docs/hyperdrive/get-started.mdx +++ b/src/content/docs/hyperdrive/get-started.mdx @@ -21,7 +21,7 @@ This guide will instruct you through: :::note -Hyperdrive currently works with PostgreSQL, MySQL and many compatible databases. This includes CockroachDB and Materialize (which are PostgreSQL-compatible), and Planetscale. +Hyperdrive currently works with PostgreSQL, MySQL and many compatible databases. This includes CockroachDB and Materialize (which are PostgreSQL-compatible), and PlanetScale. Learn more about the [databases that Hyperdrive supports](/hyperdrive/reference/supported-databases-and-features). diff --git a/src/content/docs/hyperdrive/index.mdx b/src/content/docs/hyperdrive/index.mdx index 5c0d2846dbcfac..b980519ce6b3a7 100644 --- a/src/content/docs/hyperdrive/index.mdx +++ b/src/content/docs/hyperdrive/index.mdx @@ -32,7 +32,7 @@ Turn your existing regional database into a globally distributed database. Hyperdrive is a service that accelerates queries you make to existing databases, making it faster to access your data from across the globe from [Cloudflare Workers](/workers/), irrespective of your users' location. -Hyperdrive supports any Postgres or MySQL database, including those hosted on AWS, Google Cloud, Azure, Neon and Planetscale. Hyperdrive also supports Postgres-compatible databases like CockroachDB and Timescale. +Hyperdrive supports any Postgres or MySQL database, including those hosted on AWS, Google Cloud, Azure, Neon and PlanetScale. Hyperdrive also supports Postgres-compatible databases like CockroachDB and Timescale. You do not need to write new code or replace your favorite tools: Hyperdrive works with your existing code and tools you use. Use Hyperdrive's connection string from your Cloudflare Workers application with your existing Postgres drivers and object-relational mapping (ORM) libraries: diff --git a/src/content/docs/hyperdrive/reference/supported-databases-and-features.mdx b/src/content/docs/hyperdrive/reference/supported-databases-and-features.mdx index 8c757b75675bf4..d6d40454103bb0 100644 --- a/src/content/docs/hyperdrive/reference/supported-databases-and-features.mdx +++ b/src/content/docs/hyperdrive/reference/supported-databases-and-features.mdx @@ -30,7 +30,7 @@ Hyperdrive also supports databases that are compatible with the Postgres or MySQ | Timescale | ✅ | All | See the [Timescale guide](/hyperdrive/examples/connect-to-postgres/postgres-database-providers/timescale/) to connect. | | Materialize | ✅ | All | Postgres-compatible. Refer to the [Materialize guide](/hyperdrive/examples/connect-to-postgres/postgres-database-providers/materialize/) to connect. | | CockroachDB | ✅ | All | Postgres-compatible. Refer to the [CockroachDB](/hyperdrive/examples/connect-to-postgres/postgres-database-providers/cockroachdb/) guide to connect. | -| Planetscale | ✅ | All | PlanetScale provides MySQL-compatible and PostgreSQL databases | +| PlanetScale | ✅ | All | PlanetScale provides MySQL-compatible and PostgreSQL databases | | MariaDB | ✅ | All | MySQL-compatible. | ## Supported TLS (SSL) modes diff --git a/src/content/docs/learning-paths/workers/get-started/first-application.mdx b/src/content/docs/learning-paths/workers/get-started/first-application.mdx index b2cd561a284cdf..9d69e0693aa598 100644 --- a/src/content/docs/learning-paths/workers/get-started/first-application.mdx +++ b/src/content/docs/learning-paths/workers/get-started/first-application.mdx @@ -7,6 +7,7 @@ sidebar: order: 3 --- +import { DashButton } from "~/components"; ## Build an AI application with Hono and Workers diff --git a/src/content/docs/learning-paths/workers/get-started/first-worker.mdx b/src/content/docs/learning-paths/workers/get-started/first-worker.mdx index ed3a3262ec3904..8e1bb44fffa155 100644 --- a/src/content/docs/learning-paths/workers/get-started/first-worker.mdx +++ b/src/content/docs/learning-paths/workers/get-started/first-worker.mdx @@ -5,7 +5,7 @@ sidebar: order: 2 --- -import { Render, PackageManagers } from "~/components"; +import { Render, PackageManagers, DashButton } from "~/components"; ## Build and deploy your first Worker diff --git a/src/content/docs/network-interconnect/get-started.mdx b/src/content/docs/network-interconnect/get-started.mdx index 7d76e439f4f660..16d6f8a45eef21 100644 --- a/src/content/docs/network-interconnect/get-started.mdx +++ b/src/content/docs/network-interconnect/get-started.mdx @@ -53,6 +53,7 @@ Consider the following service levels when planning your deployment: - **Observability**: There is no visibility of the interconnect config/status within the Cloudflare dashboard. - **Availability**: While network-resilient locations are designed to maintain connectivity during maintenance, single-homed locations can experience full service disruption. - **Backup Connectivity**: You are required to maintain alternative Internet connectivity as a backup for all CNI implementations. +- **BGP**: Customers must have a BGP session established for Dataplane 1.0/1.1 to be operational. ## Location Alignment @@ -66,7 +67,7 @@ Cloudflare partners with leading global providers, including: Console Connect, C ## End-to-End Implementation Workflow -The process of provisioning a CNI can take several weeks, depending on the complexity and third-party provider timelines. The most common delays occur during the physical connection phase, which is outside of Cloudflare's direct control. +The process of provisioning a CNI typically takes two to four weeks, depending on the complexity of implementation and third-party provider timelines. The most common delays occur during the physical connection phase, which is outside of Cloudflare's direct control. 1. **Submit Request**: Work with your account team to create a CNI request ticket, providing your desired CNI type, location, use case, and technical details. An Implementation Manager will be assigned to guide the process. 2. **Review Configuration**: The Implementation Manager will provide a detailed configuration document covering IP addressing, VLANs, and other technical specifications. You must review and approve this document. @@ -79,7 +80,7 @@ The process of provisioning a CNI can take several weeks, depending on the compl 7. [Add maintenance notifications](/network-interconnect/monitoring-and-alerts/#enable-cloudflare-status-maintenance-notification). 8. Enable tunnel health checks for Magic [Transit](/magic-transit/how-to/configure-tunnel-endpoints/#add-tunnels) / [WAN](/magic-wan/configuration/manually/how-to/configure-tunnel-endpoints/#add-tunnels). -## How-To Guides +## How-To guides ### How-To: Provision a Direct Interconnect @@ -88,41 +89,41 @@ The process of provisioning a CNI can take several weeks, depending on the compl - required port speeds (10G or 100G) - BGP ASN for Peering/Magic Transit - BGP password (optional) -2. **Order Cross-Connect**: Cloudflare will issue a Letter of Authorization (LOA). This document grants you permission to order a physical cross-connect between your equipment and a specific port on Cloudflare's hardware within the data center. This process can take one to two weeks or more, depending on the facility provider. Cloudflare's demarcation is the port that is specified in the LOA: you are responsible for the deployment, provisioning and ongoing support and operation of this connection and the commercial relationships with the facility provider and any third-party connectivity providers. +2. **Order Cross-Connect**: Cloudflare will issue a Letter of Authorization (LOA). This document grants you permission to order a physical cross-connect between your equipment and a specific port on Cloudflare's hardware within the data center. The end-to-end process for ordering a cross-connect can take one to two weeks or more, depending on the facility provider. Cloudflare's demarcation is the port that is specified in the LOA: you are responsible for the deployment, provisioning and ongoing support and operation of this connection, and the commercial relationships with the facility provider and any third-party connectivity providers. ### How-To: Provision a Partner Interconnect Cloudflare partners with leading connectivity providers globally. To provision a Partner Interconnect, you will initiate a connection request from your chosen provider's administrative portal. Cloudflare will then review and accept the request to activate the virtual circuit. -### How-To: Configure BGP and Routing +### How-To: Configure BGP and routing Once your physical cross-connect or virtual circuit is provisioned, the next phase is to configure IP routing using Border Gateway Protocol (BGP). This process typically takes about one week to complete. -#### Step 1: IP Address Provisioning +#### Step 1: IP Address provisioning 1. Cloudflare will send you a set of IPv4 and IPv6 addresses for your connection. 2. Assign the provided IPs to your router's interface that connects to Cloudflare. 3. Perform ping tests between your router and Cloudflare's router to confirm that the physical or virtual link is active and passing packets correctly. - **For Partner Interconnects**: If you are using a partner like Megaport, ensure you have configured the correct VLAN provided by your Customer Success Manager, as an incorrect VLAN can cause IP provisioning to fail. -#### Step 2: BGP Session Establishment +#### Step 2: BGP session establishment After you confirm connectivity with successful ping tests, the next step is to establish the BGP session. 1. Cloudflare will configure its side of the BGP session, and notify you once ready. -2. You will configure your side of the BGP session and accept the routes. +2. You will configure your side of the BGP session and accept the routes you need. 3. Once the session is established, traffic will begin to flow over the CNI. Contact your solutions engineer to verify that traffic is routing as expected. -#### BGP Configuration Options and Use Cases +#### BGP configuration options and use cases Depending on the Cloudflare services you use, your BGP configuration may vary: -- **Standard Peering**: This is the most common scenario, where BGP is used to exchange routes between your network and Cloudflare. Cloudflare learns your network routes, which is useful for services like CDN-only deployments or on-demand Magic Transit. It is important to note that prefixes Cloudflare learns via CNI remain local to that specific data center and are not propagated to other Cloudflare locations. +- **Standard Peering**: This is the most common scenario, where BGP is used to exchange routes between your network and Cloudflare. Cloudflare learns your network routes, which is useful for services like CDN-only deployments or on-demand Magic Transit. It is important to note that this is not peering with the Magic Transit routing table, which is global. Instead, this is peering with the specific data center's Internet edge network. This means that prefixes Cloudflare learns via CNI remain local to that specific data center and are not propagated to other Cloudflare locations. - **Magic Transit with Controlled Advertisement**: Magic Transit customers can use a second BGP session to control which prefixes are advertised to the Internet. In this setup, Cloudflare advertises no prefixes to you, and you advertise only the specific prefixes you want Cloudflare to announce on your behalf. -#### Important Note on Accepting Routes from Cloudflare +#### Important note on accepting routes from Cloudflare -If you wish to use the CNI for egress traffic from your network to Cloudflare-advertised prefixes (such as anycast or BYOIP addresses), you can accept the BGP prefixes you receive from Cloudflare (typically there will be around 4,000 routes advertised by Cloudflare). However, be aware that there is a 1 Gbps capacity limitation for traffic you send to Cloudflare over the CNI link. +If you wish to use the CNI for egress traffic from your network to Cloudflare-advertised prefixes (such as anycast or BYOIP addresses), you can accept the BGP prefixes you receive from Cloudflare (typically there will be around 4,000 to 6,000 routes advertised by Cloudflare). #### Optional: Bidirectional Forwarding Detection (BFD) diff --git a/src/content/docs/waf/change-log/general-updates.mdx b/src/content/docs/waf/change-log/general-updates.mdx index fdf49df643998c..f387399e8fe7e6 100644 --- a/src/content/docs/waf/change-log/general-updates.mdx +++ b/src/content/docs/waf/change-log/general-updates.mdx @@ -6,6 +6,7 @@ sidebar: head: - tag: title content: Changelog for general WAF updates +tableOfContents: false --- import { ProductChangelog } from "~/components"; @@ -14,4 +15,4 @@ import { ProductChangelog } from "~/components"; ## Previous updates -For preview WAF updates, refer to the [historical changelog (2024)](/waf/change-log/historical-2024/). \ No newline at end of file +For preview WAF updates, refer to the [historical changelog (2024)](/waf/change-log/historical-2024/). diff --git a/src/content/docs/workers/ci-cd/builds/configuration.mdx b/src/content/docs/workers/ci-cd/builds/configuration.mdx index db7014a6ccca1b..e575732c0cb20d 100644 --- a/src/content/docs/workers/ci-cd/builds/configuration.mdx +++ b/src/content/docs/workers/ci-cd/builds/configuration.mdx @@ -6,7 +6,7 @@ sidebar: order: 2 --- -import { DirectoryListing, Render, Tabs, TabItem } from "~/components"; +import { DirectoryListing, Render, Tabs, TabItem, DashButton } from "~/components"; When connecting your Git repository to your Worker, you can customize the configurations needed to build and deploy your Worker. @@ -89,10 +89,12 @@ You can provide custom environment variables to your build. To add environment variables via the dashboard: - 1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. - 2. In **Account Home**, select **Workers & Pages**. - 3. In **Overview**, select your Worker. - 4. Select **Settings** > **Environment variables**. + 1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + + 2. In **Overview**, select your Worker. + 3. Select **Settings** > **Environment variables**. To add env variables using Wrangler, define text and JSON via the `[vars]` configuration in your Wrangler file. diff --git a/src/content/docs/workers/ci-cd/external-cicd/github-actions.mdx b/src/content/docs/workers/ci-cd/external-cicd/github-actions.mdx index 0dd072dcf9b354..7da89c8c4a3f34 100644 --- a/src/content/docs/workers/ci-cd/external-cicd/github-actions.mdx +++ b/src/content/docs/workers/ci-cd/external-cicd/github-actions.mdx @@ -3,6 +3,7 @@ pcx_content_type: how-to title: GitHub Actions description: Integrate Workers development into your existing GitHub Actions workflows. --- +import { DashButton } from "~/components"; You can deploy Workers with [GitHub Actions](https://github.com/marketplace/actions/deploy-to-cloudflare-workers-with-wrangler). Here is how you can set up your GitHub Actions workflow. @@ -17,12 +18,13 @@ To find your Cloudflare account ID, refer to [Find account and zone IDs](/fundam ### API token To create an API token to authenticate Wrangler in your CI job: +1. In the Cloudflare dashboard, go to the **Account API tokens** page. -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com). -2. Select **Manage Account** > **Account API Tokens**. -3. Select **Create Token** > find **Edit Cloudflare Workers** > select **Use Template**. -4. Customize your token name. -5. Scope your token. + + +2. Select **Create Token** > find **Edit Cloudflare Workers** > select **Use Template**. +3. Customize your token name. +4. Scope your token. You will need to choose the account and zone resources that the generated API token will have access to. We recommend scoping these down as much as possible to limit the access of your token. For example, if you have access to three different Cloudflare accounts, you should restrict the generated API token to only the account on which you will be deploying a Worker. diff --git a/src/content/docs/workers/configuration/cron-triggers.mdx b/src/content/docs/workers/configuration/cron-triggers.mdx index 294ce3e8915d3e..0a02d5bef2f4ee 100644 --- a/src/content/docs/workers/configuration/cron-triggers.mdx +++ b/src/content/docs/workers/configuration/cron-triggers.mdx @@ -5,7 +5,7 @@ head: [] description: Enable your Worker to be executed on a schedule. --- -import { Render, WranglerConfig, TabItem, Tabs } from "~/components"; +import { Render, WranglerConfig, TabItem, Tabs, DashButton } from "~/components"; ## Background @@ -113,9 +113,11 @@ crons = ["0 * * * *"] To add Cron Triggers in the Cloudflare dashboard: -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. In Account Home, select **Workers & Pages**. -3. In **Overview**, select your Worker > **Settings** > **Triggers** > **Cron Triggers**. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. In **Overview**, select your Worker > **Settings** > **Triggers** > **Cron Triggers**. ## Supported cron expressions @@ -195,11 +197,13 @@ curl "http://localhost:8787/cdn-cgi/handler/scheduled?cron=*+*+*+*+*&time=174585 To view the execution history of Cron Triggers, view **Cron Events**: -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. In Account Home, go to **Workers & Pages**. -3. In **Overview**, select your **Worker**. -4. Select **Settings**. -5. Under **Trigger Events**, select **View events**. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. In **Overview**, select your **Worker**. +3. Select **Settings**. +4. Under **Trigger Events**, select **View events**. Cron Events stores the 100 most recent invocations of the Cron scheduled event. [Workers Logs](/workers/observability/logs/workers-logs) also records invocation logs for the Cron Trigger with a longer retention period and a filter & query interface. If you are interested in an API to access Cron Events, use Cloudflare's [GraphQL Analytics API](/analytics/graphql-api). @@ -217,8 +221,11 @@ Refer to [Metrics and Analytics](/workers/observability/metrics-and-analytics/) To delete a Cron Trigger on a deployed Worker via the dashboard: -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. Go to **Workers & Pages**, and select your Worker. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. Select your Worker. 3. Go to **Triggers** > select the three dot icon next to the Cron Trigger you want to remove > **Delete**. #### Via the [Wrangler configuration file](/workers/wrangler/configuration/) @@ -252,12 +259,14 @@ Renewable energy can be purchased in a number of ways, including through on-site Green Compute can be configured at the account level: -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. In Account Home, select **Workers & Pages**. -3. In the **Account details** section, find **Compute Setting**. -4. Select **Change**. -5. Select **Green Compute**. -6. Select **Confirm**. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. In the **Account details** section, find **Compute Setting**. +3. Select **Change**. +4. Select **Green Compute**. +5. Select **Confirm**. ## Related resources diff --git a/src/content/docs/workers/configuration/environment-variables.mdx b/src/content/docs/workers/configuration/environment-variables.mdx index 1a2ac65a52a9a3..62af6defcb250e 100644 --- a/src/content/docs/workers/configuration/environment-variables.mdx +++ b/src/content/docs/workers/configuration/environment-variables.mdx @@ -5,7 +5,7 @@ head: [] description: You can add environment variables, which are a type of binding, to attach text strings or JSON values to your Worker. --- -import { Render, TabItem, Tabs, WranglerConfig } from "~/components"; +import { Render, TabItem, Tabs, WranglerConfig, DashButton } from "~/components"; ## Background @@ -79,15 +79,16 @@ Learn about [environments in Wrangler](/workers/wrangler/environments). ## Add environment variables via the dashboard To add environment variables via the dashboard: +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. -1. Log in to [Cloudflare dashboard](https://dash.cloudflare.com/) and select your account. -2. Select **Workers & Pages**. -3. In **Overview**, select your Worker. -4. Select **Settings**. -5. Under **Variables and Secrets**, select **Add**. -6. Select a **Type**, input a **Variable name**, and input its **Value**. This variable will be made available to your Worker. -7. (Optional) To add multiple environment variables, select **Add variable**. -8. Select **Deploy** to implement your changes. + + +2. In **Overview**, select your Worker. +3. Select **Settings**. +4. Under **Variables and Secrets**, select **Add**. +5. Select a **Type**, input a **Variable name**, and input its **Value**. This variable will be made available to your Worker. +6. (Optional) To add multiple environment variables, select **Add variable**. +7. Select **Deploy** to implement your changes. :::caution[Plaintext strings and secrets] Select the **Secret** type if your environment variable is a [secret](/workers/configuration/secrets/). Alternatively, consider [Cloudflare Secrets Store](/secrets-store/), for account-level secrets. diff --git a/src/content/docs/workers/configuration/routing/custom-domains.mdx b/src/content/docs/workers/configuration/routing/custom-domains.mdx index 1c091634f3bce9..8fb4b295a78f7d 100644 --- a/src/content/docs/workers/configuration/routing/custom-domains.mdx +++ b/src/content/docs/workers/configuration/routing/custom-domains.mdx @@ -4,7 +4,7 @@ title: Custom Domains --- -import { WranglerConfig } from "~/components"; +import { WranglerConfig, DashButton } from "~/components"; ## Background @@ -27,7 +27,7 @@ To add a Custom Domain, you must have: 1. An [active Cloudflare zone](/dns/zone-setups/). 2. A Worker to invoke. -Custom Domains can be attached to your Worker via the [Cloudflare dashboard](/workers/configuration/routing/custom-domains/#set-up-a-custom-domain-in-the-dashboard), [Wrangler](/workers/configuration/routing/custom-domains/#set-up-a-custom-domain-in-your-wrangler-configuration-file) or the [API](/api/resources/workers/subresources/domains/methods/list/). +Custom Domains can be attached to your Worker via the Cloudflare dashboard, [Wrangler](/workers/configuration/routing/custom-domains/#set-up-a-custom-domain-in-your-wrangler-configuration-file) or the [API](/api/resources/workers/subresources/domains/methods/list/). :::caution @@ -41,8 +41,11 @@ You cannot create a Custom Domain on a hostname with an existing CNAME DNS recor To set up a Custom Domain in the dashboard: -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. Select **Workers & Pages** and in **Overview**, select your Worker. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. In **Overview**, select your Worker. 3. Go to **Settings** > **Domains & Routes** > **Add** > **Custom Domain**. 4. Enter the domain you want to configure for your Worker. 5. Select **Add Custom Domain**. @@ -149,21 +152,28 @@ If you are currently invoking a Worker using a [route](/workers/configuration/ro ### Migrate from Routes via the dashboard To migrate the route `example.com/*`: +1. In the Cloudflare dashboard, go to the **Account home** page. + + -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. Go to **DNS** and delete the CNAME record for `example.com`. -3. Go to **Account Home** > **Workers & Pages**. -4. In **Overview**, select your Worker > **Settings** > **Domains & Routes**. -5. Select **Add** > **Custom domain** and add `example.com`. -6. Delete the route `example.com/*` located in your Worker > **Settings** > **Domains & Routes**. +2. Select your domain. +3. Go to **DNS** and delete the CNAME record for `example.com`. +4. Go to **Account Home** > **Workers & Pages**. +5. In **Overview**, select your Worker > **Settings** > **Domains & Routes**. +6. Select **Add** > **Custom domain** and add `example.com`. +7. Delete the route `example.com/*` located in your Worker > **Settings** > **Domains & Routes**. ### Migrate from Routes via Wrangler To migrate the route `example.com/*` in your [Wrangler configuration file](/workers/wrangler/configuration/): -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. Go to **DNS** and delete the CNAME record for `example.com`. -3. Add the following to your Wrangler file: +1. In the Cloudflare dashboard, go to the **Account home** page. + + + +2. Select your domain. +3. Go to **DNS** and delete the CNAME record for `example.com`. +4. Add the following to your Wrangler file: diff --git a/src/content/docs/workers/configuration/routing/routes.mdx b/src/content/docs/workers/configuration/routing/routes.mdx index 1fec3a061f6bb5..dee5a1d8b7a4de 100644 --- a/src/content/docs/workers/configuration/routing/routes.mdx +++ b/src/content/docs/workers/configuration/routing/routes.mdx @@ -3,7 +3,7 @@ pcx_content_type: concept title: Routes --- -import { WranglerConfig } from "~/components"; +import { WranglerConfig, DashButton } from "~/components"; ## Background @@ -42,9 +42,11 @@ Routes can also be created via the API. Refer to the [Workers Routes API documen Before you set up a route, make sure you have a DNS record set up for the [domain](/dns/manage-dns-records/how-to/create-zone-apex/) or [subdomain](/dns/manage-dns-records/how-to/create-subdomain/) you would like to route to. To set up a route in the dashboard: +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. Go to **Workers & Pages** and in **Overview**, select your Worker. + + +2. In **Overview**, select your Worker. 3. Go to **Settings** > **Domains & Routes** > **Add** > **Route**. 4. Select the zone and enter the route pattern. 5. Select **Add route**. @@ -71,7 +73,7 @@ zone_id = "" -Add the `zone_name` or `zone_id` option after each route. The `zone_name` and `zone_id` options are interchangeable. If using `zone_id`, find your zone ID by logging in to the [Cloudflare dashboard](https://dash.cloudflare.com) > select your account > select your website > find the **Zone ID** in the lefthand side of **Overview**. +Add the `zone_name` or `zone_id` option after each route. The `zone_name` and `zone_id` options are interchangeable. If using `zone_id`, find your zone ID by logging in to the Cloudflare dashboard > select your account > select your website > find the **Zone ID** in the left-hand side of **Overview**. To add multiple routes: diff --git a/src/content/docs/workers/configuration/versions-and-deployments/gradual-deployments.mdx b/src/content/docs/workers/configuration/versions-and-deployments/gradual-deployments.mdx index 986ca63cbcc0f7..32470ad93ff911 100644 --- a/src/content/docs/workers/configuration/versions-and-deployments/gradual-deployments.mdx +++ b/src/content/docs/workers/configuration/versions-and-deployments/gradual-deployments.mdx @@ -5,7 +5,7 @@ head: [] description: Incrementally deploy code changes to your Workers with gradual deployments. --- -import { Example } from "~/components"; +import { Example, DashButton } from "~/components"; Gradual Deployments give you the ability to incrementally deploy new [versions](/workers/configuration/versions-and-deployments/#versions) of Workers by splitting traffic across versions. @@ -19,7 +19,7 @@ Using gradual deployments, you can: ## Use gradual deployments -The following section guides you through an example usage of gradual deployments. You will choose to use either [Wrangler](/workers/configuration/versions-and-deployments/gradual-deployments/#via-wrangler) or the [Cloudflare dashboard](/workers/configuration/versions-and-deployments/gradual-deployments/#via-the-cloudflare-dashboard) to: +The following section guides you through an example usage of gradual deployments. You will choose to use either [Wrangler](/workers/configuration/versions-and-deployments/gradual-deployments/#via-wrangler) or the Cloudflare dashboard to: - Create a new Worker. - Publish a new version of that Worker without deploying it. @@ -88,9 +88,11 @@ npx wrangler versions deploy ### Via the Cloudflare dashboard -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/?to=/:account/workers) and select your account. -2. Go to **Workers & Pages**. -3. Select **Create application** > **Hello World** template > deploy your Worker. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. Select **Create application** > **Hello World** template > deploy your Worker. 4. Once the Worker is deployed, go to the online code editor through **Edit code**. Edit the Worker code (change the `Response` content) and upload the Worker. 5. To save changes, select the **down arrow** next to **Deploy** > **Save**. This will create a new version of your Worker. 6. Create a new deployment that splits traffic between the two versions created in step 3 and 5 by going to **Deployments** and selecting **Deploy Version**. @@ -157,7 +159,7 @@ curl -s https://example.com -H 'Cloudflare-Workers-Version-Overrides: my-worker- The dictionary can contain multiple key-value pairs. Each key indicates the name of the Worker the override should be applied to. The value indicates the version ID that should be used and must be a [String](https://www.rfc-editor.org/rfc/rfc8941#name-strings). -A version override will only be applied if the specified version is in the current deployment. The versions in the current deployment can be found using the [`wrangler deployments list`](/workers/wrangler/commands/#list-5) command or on the [Workers Dashboard](https://dash.cloudflare.com/?to=/:account/workers) under Worker > Deployments > Active Deployment. +A version override will only be applied if the specified version is in the current deployment. The versions in the current deployment can be found using the [`wrangler deployments list`](/workers/wrangler/commands/#list-5) command or on the **Workers & Pages** page of the Cloudflare dashboard > Select your Workers > Deployments > Active Deployment. :::note[Verifying that the version override was applied] diff --git a/src/content/docs/workers/configuration/versions-and-deployments/index.mdx b/src/content/docs/workers/configuration/versions-and-deployments/index.mdx index 607ec00ce18063..bb0f74f1074599 100644 --- a/src/content/docs/workers/configuration/versions-and-deployments/index.mdx +++ b/src/content/docs/workers/configuration/versions-and-deployments/index.mdx @@ -4,6 +4,7 @@ title: Versions & Deployments head: [] description: Upload versions of Workers and create deployments to release new versions. --- +import { DashButton } from "~/components"; Versions track changes to your Worker. Deployments configure how those changes are deployed to your traffic. @@ -83,9 +84,11 @@ Wrangler allows you to view the 10 most recent versions and deployments. Refer t To view your deployments in the Cloudflare dashboard: -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/?to=/:account/workers) and select your account. -2. Go to **Workers & Pages**. -3. Select your Worker > **Deployments**. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. Select your Worker > **Deployments**. ## Limits diff --git a/src/content/docs/workers/examples/103-early-hints.mdx b/src/content/docs/workers/examples/103-early-hints.mdx index e96cc6463f384d..497f7c111a2e84 100644 --- a/src/content/docs/workers/examples/103-early-hints.mdx +++ b/src/content/docs/workers/examples/103-early-hints.mdx @@ -13,6 +13,7 @@ sidebar: order: 1001 description: Allow a client to request static assets while waiting for the HTML response. --- +import { DashButton } from "~/components"; If you want to get started quickly, click on the button below. @@ -26,7 +27,10 @@ import { TabItem, Tabs } from "~/components"; To ensure Early Hints are enabled on your zone: -1. Log in to the [Cloudflare Dashboard](https://dash.cloudflare.com) and select your account and website. +1. In the Cloudflare dashboard, go to the **Account home** page. + + + 2. Go to **Speed** > **Optimization** > **Content Optimization**. 3. Enable the **Early Hints** toggle to on. diff --git a/src/content/docs/workers/examples/images-workers.mdx b/src/content/docs/workers/examples/images-workers.mdx index 3694bc3ea7ea81..5989b071ae97e2 100644 --- a/src/content/docs/workers/examples/images-workers.mdx +++ b/src/content/docs/workers/examples/images-workers.mdx @@ -13,6 +13,7 @@ sidebar: description: Set up custom domain for Images using a Worker or serve images using a prefix path and Cloudflare registered domain. --- +import { DashButton } from "~/components"; If you want to get started quickly, click on the button below. @@ -23,10 +24,11 @@ This creates a repository in your GitHub account and deploys the application to import { TabItem, Tabs } from "~/components"; To serve images from a custom domain: +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com). -2. Select your account > select **Workers & Pages**. -3. Select **Create application** > **Workers** > **Create Worker** and create your Worker. + + +2. Select **Create application** > **Workers** > **Create Worker** and create your Worker. 4. In your Worker, select **Quick edit** and paste the following code. diff --git a/src/content/docs/workers/get-started/dashboard.mdx b/src/content/docs/workers/get-started/dashboard.mdx index 453c667064c3fe..87d7dc32a35dc3 100644 --- a/src/content/docs/workers/get-started/dashboard.mdx +++ b/src/content/docs/workers/get-started/dashboard.mdx @@ -8,9 +8,9 @@ head: content: Get started - Dashboard --- -import { Render } from "~/components"; +import { Render, DashButton } from "~/components"; -Follow this guide to create a Workers application using [the Cloudflare dashboard](https://dash.cloudflare.com). +Follow this guide to create a Workers application using the Cloudflare dashboard. @@ -22,13 +22,15 @@ Follow this guide to create a Workers application using [the Cloudflare dashboar To get started with a new Workers application: -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. Go to the **Workers & Pages** section of the dashboard. -2. Select [Create](https://dash.cloudflare.com/?to=/:account/workers-and-pages/create). From here, you can: - * You can select from the gallery of production-ready templates +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +3. Select **Create application**. From here, you can: + * Select from the gallery of production-ready templates * Import an existing Git repository on your own account * Let Cloudflare clone and bootstrap a public repository containing a Workers application. -3. Once you've connected to your chosen [Git provider](/workers/ci-cd/builds/git-integration/github-integration/), configure your project and click `Deploy`. +3. Once you have connected to your chosen [Git provider](/workers/ci-cd/builds/git-integration/github-integration/), configure your project and select **Deploy**. 4. Cloudflare will kick off a new build and deployment. Once deployed, preview your Worker at its provided `workers.dev` subdomain. ## Continue development diff --git a/src/content/docs/workers/observability/logs/real-time-logs.mdx b/src/content/docs/workers/observability/logs/real-time-logs.mdx index a41e7b60968e13..f60c4050d6bb8b 100644 --- a/src/content/docs/workers/observability/logs/real-time-logs.mdx +++ b/src/content/docs/workers/observability/logs/real-time-logs.mdx @@ -8,7 +8,7 @@ sidebar: order: 3 --- -import { TabItem, Tabs, Steps } from "~/components"; +import { TabItem, Tabs, DashButton } from "~/components"; With Real-time logs, access all your log events in near real-time for log events happening globally. Real-time logs is helpful for immediate feedback, such as the status of a new deployment. @@ -23,14 +23,13 @@ Real-time logs are not available for zones on the [Cloudflare China Network](/ch ## View logs from the dashboard To view real-time logs associated with any deployed Worker using the Cloudflare dashboard: +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. - -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. In Account Home, go to **Workers & Pages**. -3. In **Overview**, select your **Worker**. -4. Select **Logs**. -5. In the right-hand navigation bar, select **Live**. - + + +2. In **Overview**, select your **Worker**. +3. Select **Logs**. +4. In the right-hand navigation bar, select **Live**. ## View logs using `wrangler tail` diff --git a/src/content/docs/workers/platform/pricing.mdx b/src/content/docs/workers/platform/pricing.mdx index 4db79c4caedebb..559a34610024d0 100644 --- a/src/content/docs/workers/platform/pricing.mdx +++ b/src/content/docs/workers/platform/pricing.mdx @@ -7,7 +7,7 @@ head: [] description: Workers plans and pricing information. --- -import { GlossaryTooltip, Render } from "~/components"; +import { GlossaryTooltip, Render, DashButton } from "~/components"; By default, users have access to the Workers Free plan. The Workers Free plan includes limited usage of Workers, Pages Functions, Workers KV and Hyperdrive. Read more about the [Free plan limits](/workers/platform/limits/#worker-limits). @@ -113,15 +113,18 @@ Changing the usage model only affects billable usage, and has no technical impli To change your default account-wide usage model: -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/?to=/:account/workers-and-pages) and select your account. -2. In Account Home, select **Workers & Pages**. -3. Find **Usage Model** on the right-side menu > **Change**. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. Find **Usage Model** on the right-side menu > **Change**. Usage models may be changed at the individual Worker level: +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/?to=/:account/workers/services/view/:worker/production/settings) and select your account. -2. In Account Home, select **Workers & Pages**. -3. In **Overview**, select your Worker > **Settings** > **Usage Model**. +2. In **Overview**, select your Worker > **Settings** > **Usage Model**. Existing Workers will not be impacted when changing the default usage model. You may change the usage model for individual Workers without affecting your account-wide default usage model. diff --git a/src/content/docs/workers/tutorials/automated-analytics-reporting/index.mdx b/src/content/docs/workers/tutorials/automated-analytics-reporting/index.mdx index 0cb75dad87e606..0843677add8c37 100644 --- a/src/content/docs/workers/tutorials/automated-analytics-reporting/index.mdx +++ b/src/content/docs/workers/tutorials/automated-analytics-reporting/index.mdx @@ -21,6 +21,7 @@ import { TabItem, Tabs, WranglerConfig, + DashButton, } from "~/components"; In this tutorial, you will create a [Cloudflare Worker](https://workers.cloudflare.com/) that fetches analytics data about your account from Cloudflare's [GraphQL Analytics API](https://developers.cloudflare.com/analytics/graphql-api/). You will be able to view the account analytics data in your browser and receive a scheduled email report. @@ -445,9 +446,11 @@ Once you put the secrets, preview your analytics data at `account-analytics. + +2. Select `account-analytics` Worker. 3. Go to **Settings** > **Domains & Routes**. diff --git a/src/content/docs/workers/tutorials/deploy-a-realtime-chat-app/index.mdx b/src/content/docs/workers/tutorials/deploy-a-realtime-chat-app/index.mdx index 83d6cc231cdfad..82331cc05eb47d 100644 --- a/src/content/docs/workers/tutorials/deploy-a-realtime-chat-app/index.mdx +++ b/src/content/docs/workers/tutorials/deploy-a-realtime-chat-app/index.mdx @@ -9,7 +9,7 @@ tags: - JavaScript --- -import { Render, WranglerConfig } from "~/components"; +import { Render, WranglerConfig, DashButton } from "~/components"; In this tutorial, you will deploy a serverless, real-time chat application that runs using [Durable Objects](/durable-objects/). @@ -75,10 +75,14 @@ custom_domain = true To test your live application: -1. Open your `edge-chat-demo..workers.dev` subdomain. Your subdomain can be found in the [Cloudflare dashboard](https://dash.cloudflare.com) > **Workers & Pages** > your Worker > **Triggers** > **Routes** > select the `edge-chat-demo..workers.dev` route. -2. Enter a name in the **your name** field. -3. Choose whether to enter a public room or create a private room. -4. Send the link to other participants. You will be able to view room participants on the right side of the screen. +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. Select your Worker > **Triggers** > **Routes** > Select the `edge-chat-demo..workers.dev` route. +3. Enter a name in the **your name** field. +4. Choose whether to enter a public room or create a private room. +5. Send the link to other participants. You will be able to view room participants on the right side of the screen. ## Uninstall your application @@ -108,11 +112,12 @@ deleted_classes = ["ChatRoom", "RateLimiter"] Then run `npx wrangler deploy`. To delete your Worker: +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. In Account Home, select **Workers & Pages**. -3. In **Overview**, select your Worker. -4. Select **Manage Service** > **Delete**. For complete instructions on set up and deletion, refer to the `README.md` in your cloned repository. +2. In **Overview**, select your Worker. +3. Select **Manage Service** > **Delete**. For complete instructions on set up and deletion, refer to the `README.md` in your cloned repository. By completing this tutorial, you have deployed a real-time chat application with Durable Objects and Cloudflare Workers. diff --git a/src/content/docs/workers/tutorials/using-prisma-postgres-with-workers/index.mdx b/src/content/docs/workers/tutorials/using-prisma-postgres-with-workers/index.mdx index 1d0df6d1bd02d0..7ce0042da7b97f 100644 --- a/src/content/docs/workers/tutorials/using-prisma-postgres-with-workers/index.mdx +++ b/src/content/docs/workers/tutorials/using-prisma-postgres-with-workers/index.mdx @@ -10,7 +10,7 @@ tags: - PostgreSQL --- -import { PackageManagers } from "~/components"; +import { PackageManagers, DashButton } from "~/components"; [Prisma Postgres](https://www.prisma.io/postgres) is a managed, serverless PostgreSQL database. It supports features like connection pooling, caching, real-time subscriptions, and query optimization recommendations. @@ -220,7 +220,7 @@ npm run deploy The `wrangler` CLI will bundle and upload your application. -If you are not already logged in, the `wrangler` CLI will open a browser window prompting you to log in to the [Cloudflare dashboard](https://dash.cloudflare.com/). +If you are not already logged in, the `wrangler` CLI will open a browser window prompting you to log in to the Cloudflare dashboard. :::note If you belong to multiple accounts, select the account where you want to deploy the project. diff --git a/src/content/docs/workers/wrangler/migration/v1-to-v2/wrangler-legacy/commands.mdx b/src/content/docs/workers/wrangler/migration/v1-to-v2/wrangler-legacy/commands.mdx index b3bc19457cb8a5..f7ab8f2da856e2 100644 --- a/src/content/docs/workers/wrangler/migration/v1-to-v2/wrangler-legacy/commands.mdx +++ b/src/content/docs/workers/wrangler/migration/v1-to-v2/wrangler-legacy/commands.mdx @@ -9,7 +9,7 @@ head: noindex: true --- -import { Render, Type, MetaInfo, WranglerConfig } from "~/components"; +import { Render, Type, MetaInfo, WranglerConfig, DashButton } from "~/components"; @@ -107,7 +107,14 @@ wrangler logout This command only invalidates OAuth tokens acquired through the `wrangler login` command. However, it will try to delete the configuration file regardless of your authorization method. -If you wish to delete your API token, log in to the Cloudflare dashboard and go to **Overview** > **Get your API token** in the right side menu > select the three-dot menu on your Wrangler token and select **Delete** if you wish to delete your API token. +To delete your API token: + +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. + + + +2. In the **Overview** > **Get your API token** in the right side menu. +3. Select the three-dot menu on your Wrangler token and select **Delete**. --- @@ -166,7 +173,7 @@ After you have registered a subdomain, add `workers_dev` to your Wrangler file. To publish to your own domain, specify these three fields in your Wrangler file. - `zone_id` string - - The Cloudflare zone ID, for example, `zone_id = "b6558acaf2b4cad1f2b51c5236a6b972"`, which can be found in the [Cloudflare dashboard](https://dash.cloudflare.com). + - The Cloudflare zone ID, for example, `zone_id = "b6558acaf2b4cad1f2b51c5236a6b972"`, which can be found in the Cloudflare dashboard. - `route` string - The route you would like to publish to, for example, `route = "example.com/my-worker/*"`. diff --git a/src/content/partials/durable-objects/do-faq-pricing.mdx b/src/content/partials/durable-objects/do-faq-pricing.mdx index a3204638e7ffe4..be4a1b72770778 100644 --- a/src/content/partials/durable-objects/do-faq-pricing.mdx +++ b/src/content/partials/durable-objects/do-faq-pricing.mdx @@ -9,6 +9,7 @@ A Durable Object incurs duration charges as long as the JavaScript object has to Once an object has been evicted from memory, the next time it is needed, it will be recreated (calling the constructor again). There are several factors that contribute in keeping the Durable Object in memory and keeping it from hibernating or being inactive. + Find more information in [Lifecycle of a Durable Object](/durable-objects/concepts/durable-object-lifecycle/). ### Does an empty table / SQLite database contribute to my storage? diff --git a/src/content/partials/durable-objects/durable-objects-pricing.mdx b/src/content/partials/durable-objects/durable-objects-pricing.mdx index 2e080b82a45873..587b797160c165 100644 --- a/src/content/partials/durable-objects/durable-objects-pricing.mdx +++ b/src/content/partials/durable-objects/durable-objects-pricing.mdx @@ -32,7 +32,7 @@ await durableObjectStub.cat(); // billed as a request 3 Application level auto-response messages handled by [`state.setWebSocketAutoResponse()`](/durable-objects/best-practices/websockets/) will not incur additional wall-clock time, and so they will not be charged. -4 Duration is billed in wall-clock time as long as the Object is active, but is shared across all requests active on an Object at once. Calling `accept()` on a WebSocket in an Object will incur duration charges for the entire time the WebSocket is connected. It is recommended to use the WebSocket Hibernation API to avoid incurring duration charges once all event handlers finish running. Note that the Durable Object will remain active for 10 seconds after the last client disconnects. For a complete explanation, refer to [When does a Durable Object incur duration charges?](/durable-objects/platform/pricing/#when-does-a-durable-object-incur-duration-charges). +4 Duration is billed in wall-clock time as long as the Object is active, but is shared across all requests active on an Object at once. Calling `accept()` on a WebSocket in an Object will incur duration charges for the entire time the WebSocket is connected. It is recommended to use the WebSocket Hibernation API to avoid incurring duration charges once all event handlers finish running. For a complete explanation, refer to [When does a Durable Object incur duration charges?](/durable-objects/platform/pricing/#when-does-a-durable-object-incur-duration-charges). 5 Duration billing charges for the 128 MB of memory your Durable Object is allocated, regardless of actual usage. If your account creates many instances of a single Durable Object class, Durable Objects may run in the same isolate on the same physical machine and share the 128 MB of memory. These Durable Objects are still billed as if they are allocated a full 128 MB of memory. diff --git a/src/content/partials/hyperdrive/planetscale-partial.mdx b/src/content/partials/hyperdrive/planetscale-partial.mdx index bed84899102c7e..07ad16109834f9 100644 --- a/src/content/partials/hyperdrive/planetscale-partial.mdx +++ b/src/content/partials/hyperdrive/planetscale-partial.mdx @@ -8,7 +8,7 @@ import { Render } from "~/components"; You can connect Hyperdrive to any existing PlanetScale MySQL-compatible database by creating a new user and fetching your database connection string. -### Planetscale Dashboard +### PlanetScale Dashboard 1. Go to the [**PlanetScale dashboard**](https://app.planetscale.com/) and select the database you wish to connect to. 2. Click **Connect**. Enter `hyperdrive-user` as the password name (or your preferred name) and configure the permissions as desired. Select **Create password**. Note the username and password as they will not be displayed again. @@ -22,7 +22,7 @@ With the host, database name, username and password, you can now create a Hyperd :::note -When connecting to a Planetscale database with Hyperdrive, you should use a driver like [node-postgres (pg)](/hyperdrive/examples/connect-to-postgres/postgres-drivers-and-libraries/node-postgres/) or [Postgres.js](/hyperdrive/examples/connect-to-postgres/postgres-drivers-and-libraries/postgres-js/) to connect directly to the underlying database instead of the [Planetscale serverless driver](https://planetscale.com/docs/tutorials/planetscale-serverless-driver). Hyperdrive is optimized for database access for Workers and will perform global connection pooling and fast query routing by connecting directly to your database. +When connecting to a PlanetScale database with Hyperdrive, you should use a driver like [node-postgres (pg)](/hyperdrive/examples/connect-to-postgres/postgres-drivers-and-libraries/node-postgres/) or [Postgres.js](/hyperdrive/examples/connect-to-postgres/postgres-drivers-and-libraries/postgres-js/) to connect directly to the underlying database instead of the [PlanetScale serverless driver](https://planetscale.com/docs/tutorials/planetscale-serverless-driver). Hyperdrive is optimized for database access for Workers and will perform global connection pooling and fast query routing by connecting directly to your database. ::: diff --git a/src/content/partials/networking-services/cni-product-use-cases.mdx b/src/content/partials/networking-services/cni-product-use-cases.mdx index b8c397ca6b9e32..b7851f060f07e1 100644 --- a/src/content/partials/networking-services/cni-product-use-cases.mdx +++ b/src/content/partials/networking-services/cni-product-use-cases.mdx @@ -10,7 +10,7 @@ CNI provides a private point-to-point IP connection with Cloudflare. There are t | **Magic Transit Direct Server Return (DSR)**
DDoS protection for all ingress traffic from the Internet to your public network. Send egress traffic via your ISP. | Supported with a GRE tunnel established over the interconnect circuit. | Supported with or without a GRE tunnel established over the interconnect circuit. | | **Magic Transit with Egress**
DDoS protection for all ingress traffic from the Internet to your public network. Send egress traffic via Cloudflare. | Supported with a GRE tunnel established over the interconnect circuit. | Supported with a GRE tunnel established over the interconnect circuit. | | **Magic WAN and Zero Trust**
Build a secure, private network backbone connecting your Zero Trust users and applications with all your sites, data centers, and clouds. | Supported with a GRE tunnel established over the interconnect circuit. | Supported with or without a GRE tunnel established over the interconnect circuit. | -| **Peering**
Exchange public routes with a single Cloudflare PoP (Point of Presence). | Supported. All customers connecting with the edge data center will exchange public routes at that PoP with AS13335. Connectivity is established at each individual PoP. Routes for other edge locations in Cloudflare's network may not be available. Routes for customer-advertised prefixes will be available only in the connected PoP. | | -| **Application Security and Performance**
Improve the performance and security of your web applications | **Supported via peering**: Customers can use Argo Smart Routing to direct origin traffic via the edge peering connection when it is determined to be the lowest latency option. Customers must maintain a direct Internet connection which will always be used for a portion of traffic and during failure scenarios.
**Supported Via Magic Transit**: Customers may configure any product with an origin server IP address that is protected by Magic Transit. Magic Transit will direct this traffic via the overlay and customer can control interconnect next-hops using the Magic networking routing table. | | +| **Peering**
Exchange public routes with a single Cloudflare PoP (Point of Presence). | Supported. All customers connecting with the edge data center will exchange public routes at that PoP with AS13335. Connectivity is established at each individual PoP. Routes for other edge locations in Cloudflare's network may not be available. Routes for customer-advertised prefixes will be available only in the connected PoP. | Supported. All customers connecting with the edge data center will exchange public routes at that PoP with AS13335. Connectivity is established at each individual PoP. Routes for other edge locations in Cloudflare's network may not be available. Routes for customer-advertised prefixes will be available only in the connected PoP. | +| **Application Security and Performance**
Improve the performance and security of your web applications | **Supported via peering**: Customers can use Argo Smart Routing to direct origin traffic via the edge peering connection when it is determined to be the lowest latency option. Customers must maintain a direct Internet connection which will always be used for a portion of traffic and during failure scenarios.
**Supported Via Magic Transit**: Customers may configure any product with an origin server IP address that is protected by Magic Transit. Magic Transit will direct this traffic via the overlay and customer can control interconnect next-hops using the Magic networking routing table. | **Supported via peering**: Customers can use Argo Smart Routing to direct origin traffic via the edge peering connection when it is determined to be the lowest latency option. Customers must maintain a direct Internet connection which will always be used for a portion of traffic and during failure scenarios.
**Supported Via Magic Transit**: Customers may configure any product with an origin server IP address that is protected by Magic Transit. Magic Transit will direct this traffic via the overlay and customer can control interconnect next-hops using the Magic networking routing table. | For more details refer to the [prerequisites section](/network-interconnect/get-started/#prerequisites). \ No newline at end of file diff --git a/src/content/partials/workers/get-started-dash.mdx b/src/content/partials/workers/get-started-dash.mdx index 18e9fa067e279d..63f3c464720f5e 100644 --- a/src/content/partials/workers/get-started-dash.mdx +++ b/src/content/partials/workers/get-started-dash.mdx @@ -2,9 +2,12 @@ {} --- +import { DashButton } from "~/components"; To create your first Worker using the Cloudflare dashboard: +1. In the Cloudflare dashboard, go to the **Workers & Pages** page. -1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and select your account. -2. Select **Workers & Pages** > **Create application**. + + +2. Select **Create application**. 3. Select **Create Worker** > **Deploy**. diff --git a/src/content/plans/index.json b/src/content/plans/index.json index 70fa1212f38f2c..b46f9ed1f6b839 100644 --- a/src/content/plans/index.json +++ b/src/content/plans/index.json @@ -176,9 +176,9 @@ "ent": "30 seconds" }, "minimum_cache_rules": { - "title": "Minimum Browser Cache TTL (Cache Rules)", - "free": "2 hours", - "pro": "1 hour", + "title": "Minimum Browser Cache TTL", + "free": "1 second", + "pro": "1 second", "biz": "1 second", "ent": "1 second" },