From 2f89eea32190c7962cbeb52568c621f5b944e2ca Mon Sep 17 00:00:00 2001 From: Jordan Stephens Date: Wed, 2 Jul 2025 18:55:19 -0700 Subject: [PATCH 1/2] clone samples before running prebuild --- app/get_knowledge_base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/get_knowledge_base.py b/app/get_knowledge_base.py index 366dff3..8569127 100644 --- a/app/get_knowledge_base.py +++ b/app/get_knowledge_base.py @@ -35,7 +35,8 @@ def setup_repositories(): # Define repositories and their URLs repos = { "defang-docs": "https://github.com/DefangLabs/defang-docs.git", - "defang": "https://github.com/DefangLabs/defang.git" + "defang": "https://github.com/DefangLabs/defang.git", + "samples": "https://github.com/DefangLabs/samples.git" } # Change to the temporary directory From 62759efd7e20d6d57b6e37255069c2873218081c Mon Sep 17 00:00:00 2001 From: Jordan Stephens Date: Wed, 2 Jul 2025 18:56:21 -0700 Subject: [PATCH 2/2] update kb --- app/data/knowledge_base.json | 790 ++++++++++++++++++++++++++--------- 1 file changed, 587 insertions(+), 203 deletions(-) diff --git a/app/data/knowledge_base.json b/app/data/knowledge_base.json index 553e2b5..e23c706 100644 --- a/app/data/knowledge_base.json +++ b/app/data/knowledge_base.json @@ -8,13 +8,13 @@ { "id": 2, "about": " ", - "text": "
\"Defang

Defang Documentation

Defang is a radically simpler way for developers to develop, deploy, and debug their cloud applications. Build cloud applications in any language, deploy to your cloud account with a single command, and iterate quickly with AI-assisted tooling.

Getting Started

", + "text": "
\"Defang

Defang Documentation

Defang lets you take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes. Build cloud applications in any language and stack, deploy to your account on your favorite cloud with a single command, and iterate quickly with AI-assisted tooling.

Getting Started

", "path": "/docs/intro/intro" }, { "id": 3, - "about": "What is Defang?, A Tool to Develop, Deploy, Debug, Develop, Deploy, Debug", - "text": " Defang is a radically simpler way for developers to develop, deploy, and debug their cloud applications. Defang enables you to easily author cloud applications in any language, build and deploy to the cloud with a single command, and iterate quickly with AI-assisted tooling. The [Defang CLI (command line interface)](/docs/getting-started#install-the-defang-cli.md) includes an AI-driven agent that translates natural language prompts to [generate an outline](/docs/tutorials/generate-new-code-using-ai) for your project that you can then refine. Defang can automatically build and deploy your project with a single command. - If you’re new to Defang, you can try deploying to [Defang Playground](/docs/concepts/defang-playground.md), a hosted environment to learn to use Defang with non-production workloads. - Once you’re ready, you can [deploy](/docs/tutorials/deploy-to-your-cloud) a project to your own cloud account - we call this [Defang BYOC (Bring-your-Own-Cloud)](/docs/concepts/defang-byoc.md). We offer support for the following cloud providers: * [Amazon Web Services (AWS)](/docs/tutorials/deploy-to-aws) * [DigitalOcean](/docs/tutorials/deploy-to-digitalocean) * [Google Cloud Platform (GCP)](/docs/tutorials/deploy-to-gcp) - To support stateful workloads, we've got managed storage options such as [Managed Postgres](/docs/concepts/managed-storage/managed-postgres) and [Managed Redis](/docs/concepts/managed-storage/managed-redis). - If you want, you can also [bring your own domain name](/docs/tutorials/use-your-own-domain-name) for your deployment. Defang takes care of all the heavy lifting such as configuring networking, security, [observability](/docs/concepts/observability.md) and all the other details that usually slow down the average cloud developer. It also allows you to easily [publish updates](/docs/concepts/deployments.md#deploying-updates) to your deployed application with zero downtime. Once you've deployed, you can use our AI agent to help [debug](/docs/concepts/debug) your cloud applications, using your service logs and project files to help you identify and resolve issues.", + "about": "What is Defang?, Develop Anything, Deploy Anywhere., Get Started Quickly, Deploy with a Single Command, Debug", + "text": " Defang lets you take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes. Any App, Any Stack, Any Cloud. The [Defang CLI (command line interface)](/docs/getting-started#install-the-defang-cli.md) includes an AI agent that translates natural language prompts to [generate an outline](/docs/tutorials/generate-new-code-using-ai) for your project that you can then refine. Or choose from our [library of over 50 samples](https://defang.io/samples/) covering all major frameworks and technologies. Defang can automatically build and deploy your project with a single command. - If you’re new to Defang, you can try deploying to [Defang Playground](/docs/concepts/defang-playground.md), a hosted environment to learn to use Defang with non-production workloads. - Once you’re ready, you can [deploy](/docs/tutorials/deploy-to-your-cloud) a project to your own cloud account - we call this [Defang BYOC (Bring-your-Own-Cloud)](/docs/concepts/defang-byoc.md). We offer support for the following cloud providers: * [Amazon Web Services (AWS)](/docs/tutorials/deploy-to-aws) * [DigitalOcean](/docs/tutorials/deploy-to-digitalocean) * [Google Cloud Platform (GCP)](/docs/tutorials/deploy-to-gcp) - To support stateful workloads, we've got managed storage options such as [Managed Postgres](/docs/concepts/managed-storage/managed-postgres) and [Managed Redis](/docs/concepts/managed-storage/managed-redis). - If you want, you can also [bring your own domain name](/docs/tutorials/use-your-own-domain-name) for your deployment. Defang takes care of all the heavy lifting such as configuring networking, security, [observability](/docs/concepts/observability.md) and all the other details that usually slow down the average cloud developer. It also allows you to easily [publish updates](/docs/concepts/deployments.md#deploying-updates) to your deployed application with zero downtime. Once you've deployed, you can use our AI agent to help [debug](/docs/concepts/debug) your cloud applications, using your service logs and project files to help you identify and resolve issues.", "path": "/docs/intro/what-is-defang" }, { @@ -38,7 +38,7 @@ { "id": 7, "about": "How Defang Works, Bootstrapping, Orchestrating Deployments, Building Images, Service Provisioning", - "text": "Defang is a radically simpler way to develop, deploy, and debug applications in your favourite cloud. Defang abstracts away the complexity of cloud infrastructure, providing you with a streamlined experience. Defang works by provisioning a \"cd\" service and a small set of resources in your cloud account. These services enable Defang to orchestrate deployments for you in your cloud account from the Defang CLI. Here's how it works. The first time you deploy with Defang, a new `cd` service will be created in your cloud account. This service acts as an intermediary between you and your cloud provider. It will set up a grpc endpoint with which the Defang CLI can communicate. When the cli sends a request to trigger a deployment, for example, this service will orchestrate the build and deployment process—interfacing with the cloud APIs on your behalf. We will also create the necessary resources to support the defang system. This includes things like roles, a storage space, an image repository, certificates, etc. The specific resources created depend on the cloud provider. Our architecture and AWS implementation has passed a [\"well-architected\"](https://docs.aws.amazon.com/wellarchitected/latest/framework/welcome.html) review. We are in the process for obtaining similar qualifications with Digital Ocean and Google Cloud. You can learn more about the specifics by visiting our [provider docs](/docs/category/providers). :::info The `cd` service does not run all the time. It is only used when you deploy a new service or update an existing service. Once it has finished deploying your service, it will shut itself down. ::: ```mermaid flowchart TD subgraph workspace[\"Local Workspace\"] compose[compose.yaml] cli(\"Defang CLI\") end subgraph cloud[\"Cloud\"] sdk((\"SDK\")) cd(cd) kaniko(Kaniko) subgraph services[\" \"] service1(\"Service 1\") service2(\"Service 2\") service3(\"Service 3\") end end compose --> cli cli <--> cd cd --> kaniko cd --> sdk sdk --> services ``` The Defang `cd` service acts as an intermediary between you and your cloud provider. This service receives deployment requests from the Defang CLI. Once a request has been received, `cd` orchestrates the process of building application images from your source code, and then continues to provision the necessary resources to deploy your application. :::info The `defang` cli will upload your source code to a storage destination within your cloud. Your source code is never processed by Defang's servers. ::: When you deploy a new service, Defang will build a Docker image from your source code. This source code is uploaded by the `defang` cli to a storage destination in your cloud account. The Defang `cd` service will then retrieve it and determine if each of your service's images need to be rebuilt. If rebuilding is necessary, `cd` will start a new container for each build it needs to complete. When you deploy an update to an existing service, the Defang `cd` service will determine if rebuilding your service's images is necessary. For example, when deploying new source code, `cd` will request that a new image be built. When deploying an update which does not require a new image, one will not be built—for example, redeploying the same service with increased or decreased resource requirements. In this case, the same image can be deployed to newly provisioned cloud resources. Defang uses [Kaniko](https://github.com/GoogleContainerTools/kaniko) to build your images in a container in your cloud account. The resulting images will be stored in your cloud account's private container registry for future reference. After your images have been built, `cd` will provision the necessary resources and deploy these images as new services in your cloud account. Defang uses the cloud provider's SDK to create the necessary resources for your services. This may include creating new containers, setting up networking, and configuring any other resources your services needs, such as storage resources. When deploying changes to existing services, the `cd` service will determine the minimum set of changes necessary and add, remove, replace, or update services as necessary.", + "text": "Defang is an AI-assisted tool that lets you take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes. Defang abstracts away the complexity of cloud infrastructure, providing you with a streamlined experience. Defang works by provisioning a \"CD\" service and a small set of resources in your cloud account. These services enable Defang to orchestrate deployments for you in your cloud account from the `defang` CLI. Here's how it works. The first time you deploy with Defang, a new `CD` service will be created in your cloud account. This service acts as an intermediary between you and your cloud provider. It will set up a grpc endpoint with which the `defang` CLI can communicate. When the CLI sends a request to trigger a deployment, for example, this service will orchestrate the build and deployment process—interfacing with the cloud APIs on your behalf. We will also create the necessary resources to support the Defang system. This includes things like roles, a storage space, an image repository, certificates, etc. The specific resources created depend on the cloud provider. Our architecture and AWS implementation has passed a [\"well-architected\"](https://docs.aws.amazon.com/wellarchitected/latest/framework/welcome.html) review. We are in the process for obtaining similar qualifications with Digital Ocean and Google Cloud. You can learn more about the specifics by visiting our [provider docs](/docs/category/providers). :::info The `CD` service does not run all the time. It is only used when you deploy a new service or update an existing service. Once it has finished deploying your service, it will shut itself down. ::: ```mermaid flowchart TD subgraph workspace[\"Local Workspace\"] compose[compose.yaml] CLI(\"Defang CLI\") end subgraph cloud[\"Cloud\"] sdk((\"SDK\")) CD(CD) kaniko(Kaniko) subgraph services[\" \"] service1(\"Service 1\") service2(\"Service 2\") service3(\"Service 3\") end end compose --> CLI CLI <--> CD CD --> kaniko CD --> sdk sdk --> services ``` The Defang `CD` service acts as an intermediary between you and your cloud provider. This service receives deployment requests from the `defang` CLI. Once a request has been received, `CD` orchestrates the process of building application images from your source code, and then continues to provision the necessary resources to deploy your application. :::info The `defang` CLI will upload your source code to a storage destination within your cloud. Your source code is never processed by Defang's servers. ::: When you deploy a new service, Defang will build a Docker image from your source code. This source code is uploaded by the `defang` CLI to a storage destination in your cloud account. The Defang `CD` service will then retrieve it and determine if each of your service's images need to be rebuilt. If rebuilding is necessary, `CD` will start a new container for each build it needs to complete. When you deploy an update to an existing service, the Defang `CD` service will determine if rebuilding your service's images is necessary. For example, when deploying new source code, `CD` will request that a new image be built. When deploying an update which does not require a new image, one will not be built—for example, redeploying the same service with increased or decreased resource requirements. In this case, the same image can be deployed to newly provisioned cloud resources. Defang uses [Kaniko](https://github.com/GoogleContainerTools/kaniko) to build your images in a container in your cloud account. The resulting images will be stored in your cloud account's private container registry for future reference. After your images have been built, `CD` will provision the necessary resources and deploy these images as new services in your cloud account. Defang uses the cloud provider's SDK to create the necessary resources for your services. This may include creating new containers, setting up networking, and configuring any other resources your services needs, such as storage resources. When deploying changes to existing services, the `CD` service will determine the minimum set of changes necessary and add, remove, replace, or update services as necessary.", "path": "/docs/intro/how-it-works" }, { @@ -55,26 +55,26 @@ }, { "id": 10, - "about": "Frequently Asked Questions (FAQ), Deployment and Infrastructure, Which cloud/region is the app being deployed to?, Can I bring my own AWS or other cloud account?, On AWS, can I deploy to services such as EC2, EKS, or Lambda?, Can I access AWS storage services such as S3 or database services such as RDS? How?, Do you plan to support other clouds?, Deployment Process, Can I run production apps with Defang?, Does Defang support blue/green deployments?, Does Defang support rolling deployments?, Does Defang support auto-scaling?, Can I cancel a deployment once it has started?, Will deploying a new version of my app cause downtime?, Can I deploy multiple services at once?, Can I deploy a service that depends on another service?, Feature Comparisons, Is Defang a run-time platform?, What is the difference between Defang and platforms such as Vercel, fly.io, and Railway?, What is the difference between Defang and tools such as SST?, Troubleshooting, I'm having trouble running the binary on my Mac. What should I do?, I'm getting a warning/error. What does it mean?", + "about": "Frequently Asked Questions (FAQ), Deployment and Infrastructure, Which cloud/region is the app being deployed to?, Can I bring my own AWS or other cloud account?, On AWS, can I deploy to services such as EC2, EKS, or Lambda?, Can I access AWS storage services such as S3 or database services such as RDS? How?, Do you plan to support other clouds?, Deployment Process, Can I run production apps with Defang?, Does Defang support blue/green deployments?, Does Defang support rolling deployments?, Does Defang support auto-scaling?, Can I cancel a deployment once it has started?, Will deploying a new version of my app cause downtime?, Can I deploy multiple services at once?, Can I deploy a service that depends on another service?, Feature Comparisons, Is Defang a run-time platform?, What is the difference between Defang and platforms such as Vercel, fly.io, Railway, Render, or Heroku?, What is the difference between Defang and tools such as SST?, Troubleshooting, I'm having trouble running the binary on my Mac. What should I do?, I'm getting a warning/error. What does it mean?", "text": "- In the [Defang Playground](/docs/concepts/defang-playground), the app is deployed to AWS `us-west-2`. In the [Defang BYOC](/docs/concepts/defang-byoc) model, the region is determined by your Defang BYOC [Provider](/docs/category/providers) settings. - Yes! Defang makes it easy to deploy your application to your own cloud account. Please check out the [Defang BYOC](/docs/concepts/defang-byoc) documentation for more information. - The current release includes support for containers only, deployed to ECS. We are still exploring how to support additional execution models such as VMs and functions-as-a-service. However, using our Pulumi provider, it is possible to combine Defang services with other native AWS resources. - Yes! You can access AWS services in the AWS Dashboard as you normally would when you are [deploying to your AWS account](/docs/providers/aws) using Defang. In fact, you can access whatever other resources exist in the cloud account you are using for [Defang BYOC](/docs/concepts/defang-byoc). - While we currently support [AWS](/docs/providers/aws) for production, [GCP](/docs/providers/gcp) and [DigitalOcean](/docs/providers/digitalocean) are in preview with [Defang V1](/blog/2024-12-04-launch-week). We plan to support other clouds, such as [Azure](/docs/providers/azure), in future releases. - Yes! Defang makes it easy to deploy your app on production-ready infrastructure in your own cloud account. For example, you can deploy your app to AWS with `defang compose up --provider=aws --mode=production`. Check out your preferred cloud provider on [Defang BYOC](/docs/concepts/defang-byoc) and see our [Deployment Modes](/docs/concepts/deployment-modes) documentation for more information. - Defang does not currently support blue/green deployments, but it does support rolling updates with the `--mode=production` flag. See the [Deployment Modes](/docs/concepts/deployment-modes) documentation for more information. - Yes! Defang supports rolling updates with the `--mode=production` flag. See the [Deployment Modes](/docs/concepts/deployment-modes) documentation for more information. - No. Defang does not currently support auto-scaling. However, you can check out the [Scaling Your Services](/docs/tutorials/scaling-your-services) tutorial to see how you can scale your services manually with Defang. - No. Once a deployment has started, it cannot be canceled. However, you can always deploy a new version of your app which will replace the current deployment. - If you have deployed your application with the `--mode=production` flag, Defang will use the _production_ deployment mode. This mode will perform a rolling update to ensure zero downtime. If you use another deployment mode, you may experience downtime during the deployment, as Defang will not provision multiple replicas to save cost. See the [Deployment Modes](/docs/concepts/deployment-modes) documentation for more information. - Yes! You can deploy multiple services at once by defining them in a single compose.yaml file. When you run `defang compose up`, Defang will deploy all the services defined in the file at once. - Defang does not currently support service dependencies. All services will be deployed simultaneously. Defang will however run multiple healthchecks before marking a service as healthy and spinning down any previously deployed services when using the `production` deployment mode. See the [Deployment Modes](/docs/concepts/deployment-modes) documentation for more information. - No. Defang is not a run-time platform. Instead, it lets you host and run your application on a [cloud provider](/docs/category/providers) of your choice. You can think of it as a tool that makes it way easier to deploy to that cloud provider. We do provide [Defang Playground](/docs/concepts/defang-playground), but it is meant to be used as a testing environment only. - Defang is a tool that helps you get your application deployed to a [cloud provider](/docs/category/providers) of your choice, and it is not a platform. Unlike platforms, Defang does not host your application. We do provide [Defang Playground](/docs/concepts/defang-playground), but it is meant to be used as a testing environment only. - Defang is cloud-agnostic and language-agnostic, meaning that it is designed to work with different [cloud providers](/docs/category/providers), and programming languages. Since Defang is not tied to just one cloud or language, this allows for greater flexibility in a wide range of cases. Another difference is that Defang follows the [Compose specification](https://docs.docker.com/compose/compose-file/), allowing it to work smoothly with various container platforms such as Docker. - MacOS users will need to allow the binary to run due to security settings: 1. Attempt to run the binary. You'll see a security prompt preventing you from running it. 2. Go to System Preferences > Privacy & Security > General. 3. In the 'Allow applications downloaded from:' section, you should see a message about Defang being blocked. Click 'Open Anyway'. 4. Alternatively, select the option \"App Store and identified developers\" to allow all applications from the App Store and identified developers to run. - Please see the [Common Error Messages](/docs/faq/warnings-errors) page.", "path": "/docs/faq/questions" }, { "id": 11, "about": "Google Cloud Platform (GCP), Getting Started", - "text": ":::info The Defang GCP Provider is available for Public Preview as of December 2024. ::: :::success GCP Free Tier & Credits You can use the GCP Free Tier to try out Defang. Learn more about it [here](https://cloud.google.com/free). If you're an elligible startup, you can sign up for credits [here](https://cloud.google.com/developers/startups). ::: Defang enables you to effortlessly develop and deploy full, scalable applications with GCP. It is designed to simplify deploying your services to the cloud. As one of the leading cloud providers globally, GCP offers powerful tools and resources, and with Defang, you can bypass the complexities of the GCP platform. Let Defang handle the heavy lifting so you can focus on what matters most to you! After signing in to your GCP account, select an existing project or [create a new project](https://developers.google.com/workspace/guides/create-project), make sure [billing is enabled](https://cloud.google.com/billing/docs/how-to/modify-project), and note down the project ID and set it as environment variable `GCP_PROJECT_ID`. ```bash export GCP_PROJECT_ID= ``` Next step is to [authenticate your local environment with GCP](https://cloud.google.com/docs/authentication). Our preferred method is to set up [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc) with the Google Cloud CLI. Once the [Google Cloud CLI is installed](https://cloud.google.com/sdk/docs/install), run the following command to authenticate: ```bash gcloud init gcloud auth application-default login ``` The Defang CLI will automatically check if `GCP_PROJECT_ID` environment variable is set and correctly authenticated with GCP before running. Once you are ready to go, add the `--provider=gcp` flag to your command to tell the Defang CLI to use the GCP provider, or set the `DEFANG_PROVIDER` environment variable to `gcp`. ```bash $ defang compose up --provider=gcp", + "text": ":::info The Defang GCP Provider is available for Public Preview as of December 2024. ::: :::tip[GCP Free Tier & Credits] You can use the GCP Free Tier to try out Defang. Learn more about it [here](https://cloud.google.com/free). If you're an eligible startup, you can sign up for credits [here](https://cloud.google.com/developers/startups). ::: Defang enables you to effortlessly develop and deploy full, scalable applications with GCP. It is designed to simplify deploying your services to the cloud. As one of the leading cloud providers globally, GCP offers powerful tools and resources, and with Defang, you can bypass the complexities of the GCP platform. Let Defang handle the heavy lifting so you can focus on what matters most to you! After signing in to your GCP account, select an existing project or [create a new project](https://developers.google.com/workspace/guides/create-project), make sure [billing is enabled](https://cloud.google.com/billing/docs/how-to/modify-project), and note down the project ID and set it as environment variable `GCP_PROJECT_ID`. ```bash export GCP_PROJECT_ID= ``` Next step is to [authenticate your local environment with GCP](https://cloud.google.com/docs/authentication). Our preferred method is to set up [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc) with the Google Cloud CLI. Once the [Google Cloud CLI is installed](https://cloud.google.com/sdk/docs/install), run the following command to authenticate: ```bash gcloud init gcloud auth application-default login ``` The Defang CLI will automatically check if `GCP_PROJECT_ID` OR `CLOUDSDK_CORE_PROJECT` environment variable is set and correctly authenticated with GCP before running. Once you are ready to go, add the `--provider=gcp` flag to your command to tell the Defang CLI to use the GCP provider, or set the `DEFANG_PROVIDER` environment variable to `gcp`. ```bash $ defang compose up --provider=gcp", "path": "/docs/providers/gcp" }, { "id": 12, - "about": "or, Location, Architecture, Deployment, Runtime, Secrets, Future Improvements", - "text": "$ export DEFANG_PROVIDER=gcp ``` The Defang BYOC GCP Provider will use the location specified in the `GCP_LOCATION` environment variable. For a list of locations available in GCP, see the [location documentation](https://cloud.google.com/about/locations). If the `GCP_LOCATION` environment variable is not set, the default location `us-central1` (Iowa) will be used. Defang uses GCP cloud run to build, deploy, and run your services. The following describes the current state of Defang's support for GCP, the specific resources that Defang uses, and the roadmap for future support. To deploy your services, the Defang CLI sets up some basic resources needed, including enabling required APIs in the project, creating service accounts used to build and deploy your service with the required permissions, and creating a [Google Cloud Storage](https://cloud.google.com/storage) bucket where the Defang CLI uploads your source code to. The CLI then deploys a GCP Cloud Run Job that uses Pulumi to build your container image and run your services. The Provider builds and deploys your services using [Google Cloud Run](https://cloud.google.com/run) jobs, and runs your workloads using the [Google Cloud Run](https://cloud.google.com/run) service. The GCP provider does not currently support storing sensitive config values. The following features are in active development for GCP: - [Configuration and management of secrets](/docs/concepts//configuration.md) - [Networking and Load Balancing](/docs/concepts//networking.mdx) - [Custom Domains](/docs/concepts//domains.mdx) - [Managed Redis](/docs/concepts//managed-storage/managed-redis.md) - [Managed Postgres](/docs/concepts/managed-storage/managed-postgres.md) Stayed tuned for future updates!", + "about": "or, Location, Architecture, Deployment, Runtime, Secrets, Managed Storage, Managed Postgres, Managed Redis, Managed LLMs, Future Improvements", + "text": "$ export DEFANG_PROVIDER=gcp ``` The Defang BYOC GCP Provider will use the location specified in the `GCP_LOCATION` environment variable. For a list of locations available in GCP, see the [location documentation](https://cloud.google.com/about/locations). If the `GCP_LOCATION` environment variable is not set, the default location `us-central1` (Iowa) will be used. Defang uses GCP cloud run to build, deploy, and run your services. The following describes the current state of Defang's support for GCP, the specific resources that Defang uses, and the roadmap for future support. To deploy your services, the Defang CLI sets up some basic resources needed, including enabling required APIs in the project, creating service accounts used to build and deploy your service with the required permissions, and creating a [Google Cloud Storage](https://cloud.google.com/storage) bucket where the Defang CLI uploads your source code to. The CLI then deploys a GCP Cloud Run Job that uses Pulumi to build your container image and run your services. The Provider builds and deploys your services using [Google Cloud Run](https://cloud.google.com/run) jobs, and runs your workloads using the [Google Cloud Run](https://cloud.google.com/run) service. The GCP provider does not currently support storing sensitive config values. Defang can help you provision [managed storage](/docs/concepts/managed-storage/managed-storage.md) services. The following managed storage services are supported on GCP: When using [Managed Postgres](/docs/concepts/managed-storage/managed-postgres.mdx), the Defang CLI provisions a Cloud SQL instance in your account. When using [Managed Redis](/docs/concepts/managed-storage/managed-redis.md), the Defang CLI provisions a Memorystore for Redis cluster in your account. Defang offers integration with managed, cloud-native large language model services with the x-defang-llm service extension. Add this extension to any services which use the [Google Vertex AI SDKs](https://cloud.google.com/vertex-ai/docs/python-sdk/use-vertex-ai-sdk). The following features are in active development for GCP: - [Managed Object Storage](/docs/concepts//managed-storage/managed-object-storage.md) Stayed tuned for future updates!", "path": "/docs/providers/gcp" }, { "id": 13, - "about": "Overview, Managed services", - "text": "Overall, the Defang Playground is very similar to deploying to your own cloud account. The Playground runs on a Defang-managed AWS account, so you can expect it to work similarly to deploying to [AWS](./aws/aws.md). In essence, the Playground does not support any [managed storage](../concepts/managed-storage) services, ie. `x-defang-postgres` and `x-defang-redis` are ignored when deploying to the Playground. You can however run both Postgres and Redis as regular container services for testing purposes.", + "about": "Overview, Managed services, Managed LLMs", + "text": "Overall, the Defang Playground is very similar to deploying to your own cloud account. The Playground runs on a Defang-managed AWS account, so you can expect it to work similarly to deploying to [AWS](./aws/aws.md). In essence, the Playground does not support any [managed storage](../concepts/managed-storage) services, ie. `x-defang-postgres` and `x-defang-redis` are ignored when deploying to the Playground. You can however run both Postgres and Redis as regular container services for testing purposes. Defang offers integration with managed, cloud-native large language model services with the `x-defang-llm` service extension when deploying to your own cloud account with BYOC. This extension is supported in the Defang Playground with one caveat: your MODEL (model ID) will be limited to a default model chosen by Defang.", "path": "/docs/providers/playground" }, { @@ -86,619 +86,1003 @@ { "id": 15, "about": "DigitalOcean, Getting Started, Install Defang, Sign up for DigitalOcean, Authenticate with DigitalOcean, Authenticate with DigitalOcean Spaces, Configure your shell environment, Deploy your project to DigitalOcean", - "text": ":::info The Defang DigitalOcean Provider is available for Public Preview as of October 2024. ::: :::success DigitalOcean Credits You can get DigitalOcean credits to try out Defang. Learn more about it on their [pricing page](https://www.digitalocean.com/pricing). If you're an elligible startup, you can sign up for credits [here](https://www.digitalocean.com/hatch). ::: Why should you use Defang with DigitalOcean? Defang allows you to easily create and manage full, scalable applications with DigitalOcean. Defang aims to make it easier to deploy your services to the cloud. DigitalOcean is one of the most popular cloud providers in the world and with Defang, you can bypass the complexities of the DigitalOcean platform. Let Defang do it for you and spend more time working on what's important to you! To get started with the Defang BYOC DigitalOcean Provider, first [install the latest version of the Defang CLI](../getting-started#authenticate-with-defang). Next, make sure you have signed up for a [DigitalOcean account](https://try.digitalocean.com/freetrialoffer/). After signing up for your account, be sure to set up your [personal access token](https://docs.digitalocean.com/reference/api/create-personal-access-token/). Defang will need to find this value in your shell as the `DIGITALOCEAN_TOKEN` environment variable. You will also need a [DigitalOcean Spaces access key](https://docs.digitalocean.com/products/spaces/how-to/manage-access/). Defang will need to find this value in your shell as the `SPACES_ACCESS_KEY_ID`, and `SPACES_SECRET_ACCESS_KEY` environment variables. ```bash export DIGITALOCEAN_TOKEN= export SPACES_ACCESS_KEY_ID= export SPACES_SECRET_ACCESS_KEY= ``` The Defang CLI will automatically check if these envinonment variables are set before running. Once you are ready to go, add the `--provider=digitalocean` to your command to tell the Defang CLI to use the DigitalOcean provider or set the `DEFANG_PROVIDER` environment variable to `digitalocean`. ```bash $ defang compose up --provider=digitalocean", + "text": ":::info The Defang DigitalOcean Provider is available for Public Preview as of October 2024. ::: :::tip[DigitalOcean Credits] You can get DigitalOcean credits to try out Defang. Learn more about it on their [pricing page](https://www.digitalocean.com/pricing). If you're an eligible startup, you can sign up for credits [here](https://www.digitalocean.com/hatch). ::: Why should you use Defang with DigitalOcean? Defang allows you to easily create and manage full, scalable applications with DigitalOcean. Defang aims to make it easier to deploy your services to the cloud. DigitalOcean is one of the most popular cloud providers in the world and with Defang, you can bypass the complexities of the DigitalOcean platform. Let Defang do it for you and spend more time working on what's important to you! To get started with the Defang BYOC DigitalOcean Provider, first [install the latest version of the Defang CLI](../getting-started#authenticate-with-defang). Next, make sure you have signed up for a [DigitalOcean account](https://try.digitalocean.com/freetrialoffer/). After signing up for your account, be sure to set up your [personal access token](https://docs.digitalocean.com/reference/api/create-personal-access-token/). Defang will need to find this value in your shell as the `DIGITALOCEAN_TOKEN` environment variable. You will also need a [DigitalOcean Spaces access key](https://docs.digitalocean.com/products/spaces/how-to/manage-access/). Defang will need to find this value in your shell as the `SPACES_ACCESS_KEY_ID`, and `SPACES_SECRET_ACCESS_KEY` environment variables. ```bash export DIGITALOCEAN_TOKEN= export SPACES_ACCESS_KEY_ID= export SPACES_SECRET_ACCESS_KEY= ``` The Defang CLI will automatically check if these envinonment variables are set before running. Once you are ready to go, add the `--provider=digitalocean` to your command to tell the Defang CLI to use the DigitalOcean provider or set the `DEFANG_PROVIDER` environment variable to `digitalocean`. ```bash $ defang compose up --provider=digitalocean", "path": "/docs/providers/digitalocean/digitalocean" }, { "id": 16, "about": "or, Region, Architecture, Deployment, Runtime, Secrets, Future Improvements", - "text": "$ export DEFANG_PROVIDER=digitalocean ``` The Defang BYOC DigitalOcean Provider will use the region specified in the `REGION` environment variable. For a list of regions available in DigitalOcean, see the [region documentation](https://docs.digitalocean.com/platform/regional-availability/#app-platform-availability). Defang uses resources that are native to the cloud provider you are using. The following describes the current state of Defang's support for DigitalOcean, the specific resources that Defang uses, and the roadmap for future support. To deploy your services, the Defang CLI packages your code and uploads it to a [Spaces Object Storage](https://www.digitalocean.com/products/spaces) bucket in your account. The CLI then deploys an App Platform App that uses Pulumi to build your container image and run your service. The Provider runs your workloads using the [DigitalOcean App Platform](https://docs.digitalocean.com/products/app-platform/). Defang allows you to configure your services with [sensitive config values](https://docs.digitalocean.com/products/app-platform/how-to/use-environment-variables/) in DigitalOcean. Sensitive values are added and stored with encryption in your app once it has been deployed. The following features are still in development for DigitalOcean: - [Custom Domains](/docs/concepts//domains.mdx) - [Managed Redis](/docs/concepts//managed-storage/managed-redis.md) - [Managed Postgres](/docs/concepts/managed-storage/managed-postgres.md) Stay tuned for future updates!", + "text": "$ export DEFANG_PROVIDER=digitalocean ``` The Defang BYOC DigitalOcean Provider will use the region specified in the `REGION` environment variable. For a list of regions available in DigitalOcean, see the [region documentation](https://docs.digitalocean.com/platform/regional-availability/#app-platform-availability). Defang uses resources that are native to the cloud provider you are using. The following describes the current state of Defang's support for DigitalOcean, the specific resources that Defang uses, and the roadmap for future support. To deploy your services, the Defang CLI packages your code and uploads it to a [Spaces Object Storage](https://www.digitalocean.com/products/spaces) bucket in your account. The CLI then deploys an App Platform App that uses Pulumi to build your container image and run your service. The Provider runs your workloads using the [DigitalOcean App Platform](https://docs.digitalocean.com/products/app-platform/). Defang allows you to configure your services with [sensitive config values](https://docs.digitalocean.com/products/app-platform/how-to/use-environment-variables/) in DigitalOcean. Sensitive values are added and stored with encryption in your app once it has been deployed. The following features are still in development for DigitalOcean: - [Custom Domains](/docs/concepts//domains.mdx) - [Managed Redis](/docs/concepts//managed-storage/managed-redis.md) - [Managed Postgres](/docs/concepts/managed-storage/managed-postgres.mdx) - [Managed Language Models](/docs/concepts/managed-llms/managed-language-models.md) Stay tuned for future updates!", "path": "/docs/providers/digitalocean/digitalocean" }, { "id": 17, "about": "Amazon Web Services (AWS), Getting Started", - "text": "Why should you use Defang with AWS? Defang allows you to easily create and manage full, scalable applications with AWS. Defang aims to make it easier to deploy your services to the cloud. Don't waste your time learning the ins and outs of AWS, deciding which of the 200+ services to use, and then writing the infrastructure code to deploy your services, and making sure they are properly secured. Defang does all of that for you. :::success AWS Free Tier & Credits You can use the AWS Free Tier to try out Defang. Learn more about it [here](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc&awsf.Free%20Tier%20Types=*all&awsf.Free%20Tier%20Categories=*all). If you're an elligible startup, you can sign up for credits [here](https://aws.amazon.com/startups/sign-up?referrer_url_path=%2Fstartups). ::: Getting started with the Defang BYOC AWS Provider is easy. The first step is to [authenticate your shell](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) with AWS as an admin user. The authenticated user should be an IAM admin because Defang will need permission to create resources and IAM roles in your account. :::tip If you have the AWS CLI installed, you should be able to successfully run `aws sts get-caller-identity` and see your account ID. ::: Use the `--provider=aws` flag to tell the Defang CLI to use the AWS Provider or set the `DEFANG_PROVIDER` environment variable to `aws`. ```bash $ defang compose up --provider=aws", + "text": "Why should you use Defang with AWS? Defang allows you to easily create and manage full, scalable applications with AWS. Defang aims to make it easier to deploy your services to the cloud. Don't waste your time learning the ins and outs of AWS, deciding which of the 200+ services to use, and then writing the infrastructure code to deploy your services, and making sure they are properly secured. Defang does all of that for you. :::tip[AWS Free Tier & Credits] You can use the AWS Free Tier to try out Defang. Learn more about it [here](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc&awsf.Free%20Tier%20Types=*all&awsf.Free%20Tier%20Categories=*all). If you're an eligible startup, you can sign up for credits [here](https://aws.amazon.com/startups/sign-up?referrer_url_path=%2Fstartups). ::: Getting started with the Defang BYOC AWS Provider is easy. The first step is to [authenticate your shell](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) with AWS as an admin user. The authenticated user should be an IAM admin because Defang will need permission to create resources and IAM roles in your account. :::tip If you have the AWS CLI installed, you should be able to successfully run `aws sts get-caller-identity` and see your account ID. ::: Use the `--provider=aws` flag to tell the Defang CLI to use the AWS Provider or set the `DEFANG_PROVIDER` environment variable to `aws`. ```bash $ defang compose up --provider=aws", "path": "/docs/providers/aws/aws" }, { "id": 18, - "about": "or, Region, Architecture, Secrets, Deployment, Runtime, Service Discovery, Managed Storage, Managed Postgres, Managed Redis, Managed Resources", - "text": "$ export DEFANG_PROVIDER=aws ``` :::warning Because Defang creates roles, you need to have the appropriate permissions to create roles in your cloud provider account, typically the `AdministratorAccess` policy in AWS. ::: :::tip The Defang CLI does not depend on the AWS CLI. It uses the [AWS SDK for Go](https://aws.amazon.com/sdk-for-go/) to interact with your AWS account. In most cases, if you can run the `aws sts get-caller-identity` from the tip above, you should be good to go. However, due to a difference between the AWS CLI and the AWS SDK for Go, there is at least one case where they behave differently: if you are using `aws sso login` and have clashing profiles in your `.aws/config` and `.aws/credentials` files, the AWS CLI will prioritize SSO profiles and caches over regular profiles, but the AWS SDK for Go will prioritize the credentials file, and it may fail. ::: The Defang BYOC AWS Provider will use the region specified in the `AWS_REGION` environment variable, or a profile in the `~/.aws/config` file exactly as the AWS CLI would. Defang uses resources that are native to the cloud provider you are using. The following describes the current state of Defang's support for AWS, the specific resources that Defang uses, and the roadmap for future support. Defang allows you to configure your services with sensitive config values. Sensitive values are stored in AWS Systems Manager Parameter Store, and are encrypted. To deploy your services, the Defang CLI packages your code and uploads it to an S3 bucket in your account. The CLI then deploys an ECS task that uses Pulumi to build your container image and run your service. The provider runs your workloads using ECS using Fargate. It provisions a VPC with public and private subnets, and deploys your services to the private subnets. It then provisions an Application Load Balancer (ALB) and routes traffic to your services. Defang uses a Route53 private hosted zone for service discovery. Each (private) service in the Compose file will get a CNAME or A record which resolves to the service's AWS domain name or IP, respectively. To update the A records for the dynamically assigned IP addresses, Defang will add a [Route53 sidecar](https://github.com/DefangLabs/route53-sidecar) alongside your container. Defang can help you provision [managed storage](/docs/concepts/managed-storage/managed-storage.md) services. The following managed storage services are supported on AWS: When using [Managed Postgres](/docs/concepts/managed-storage/managed-postgres.md), the Defang CLI provisions an RDS Postgres instance in your account. When using [Managed Redis](/docs/concepts/managed-storage/managed-redis.md), the Defang CLI provisions an ElastiCache Redis cluster in your account. Defang will create and manage the following resources in your AWS account from its bootstrap CloudFormation template: | Resource Type | Example Resource Name | |---------------|------------------------| | s3/Bucket | defang-cd-bucket-cbpbzz8hzm7 | | ecs/ClusterCapacityProviderAssociations | defang-cd-Cluster-pqFhjwuklvm | | ecs/Cluster | defang-cd-ClusterpJqFhjwuklvm | | iam/Role | defang-cd-ExeutionRole-XE7RbQDfeEwx | | ec2/InternetGateway | igw-05bd7adc92541ec3 | | ec2/VPCGatewayAttachment | IGW|vpc-0cbca64f13435695 | | logs/LogGroup | defang-cd-Logroup-6LSZet3tFnEy | | ecr/PullThroughCacheRule | defang-cd-ecrpublic | | ec2/Route | rtb-08f3f5afc9e6c8c8|0.0.0.0/0 | | ec2/RouteTable | rtb-08f3f5ffc9e6c8c8 | | ec2/VPCEndpoint | vpce-02175d8d4f47d0c9 | | ec2/SecurityGroup | sg-032b839c63e70e49 | | ec2/Subnet | subnet-086bead399ddc8a0 | | ec2/SubnetRouteTableAssociation | rtbassoc-02e200d45e7227fe | | ecs/TaskDefinition | arn:aws:ecsus-west-2:381492210770:task-definition/defang-cd-TaskDefinition-RXd5tf9TaN38:1 | | iam/Role | defang-cd-askRole-gsEeDPd6sPQY | | ec2/VPC | vpc-0cbca64f13435695 | Then, for each project you deploy, Defang will create and manage the following resources: | Resource Type | Example Resource Name | |---------------|------------------------| | ecr/Repository | project1/kaniko-build | | ecr/LifecyclePolicy | project1/kaniko-build | | acm/Certificate | *.project1.tenant1.defang.app | | ecr/Repository | project1/kaniko-build/cache | | ecr/LifecyclePolicy | project1/kaniko-build/cache | | iam/InstanceProfile | ecs-agent-profile | | iam/Role | ecs-task-execution-role | | cloudwatch/EventRule | project1-ecs-lifecycle-rule | | cloudwatch/EventTarget | project1-ecs-event-cw-target | | route53/Record | validation-project1.tenant1.defang.app | | acm/CertificateValidation | *.project1.tenant1.defang.appValidation | | ec2/VpcDhcpOptionsAssociation | dhcp-options-association | | cloudwatch/LogGroup | builds | | iam/Role | kaniko-task-role | | ecs/TaskDefinition | kanikoTaskDefArm64 | | ecs/TaskDefinition | kanikoTaskDefAmd64 | | s3/Bucket | defang-build | | s3/BucketPublicAccessBlock | defang-build-block | | ecs/Cluster | cluster | | ecs/ClusterCapacityProviders | cluster-capacity-providers | | ec2/SecurityGroup | project1_app-sg | | ec2/SecurityGroup | bootstrap | | ec2/VpcDhcpOptions | dhcp-options | | cloudwatch/LogGroup | logs |", + "about": "or, Region, Architecture, Secrets, Deployment, Runtime, Service Discovery, Managed Storage, Managed Postgres, Managed Redis, Managed LLMs, Managed MongoDB, Managed Resources", + "text": "$ export DEFANG_PROVIDER=aws ``` :::warning Because Defang creates roles, you need to have the appropriate permissions to create roles in your cloud provider account, typically the `AdministratorAccess` policy in AWS. ::: :::tip The Defang CLI does not depend on the AWS CLI. It uses the [AWS SDK for Go](https://aws.amazon.com/sdk-for-go/) to interact with your AWS account. In most cases, if you can run the `aws sts get-caller-identity` from the tip above, you should be good to go. However, due to a difference between the AWS CLI and the AWS SDK for Go, there is at least one case where they behave differently: if you are using `aws sso login` and have clashing profiles in your `.aws/config` and `.aws/credentials` files, the AWS CLI will prioritize SSO profiles and caches over regular profiles, but the AWS SDK for Go will prioritize the credentials file, and it may fail. ::: The Defang BYOC AWS Provider will use the region specified in the `AWS_REGION` environment variable, or a profile in the `~/.aws/config` file exactly as the AWS CLI would. Defang uses resources that are native to the cloud provider you are using. The following describes the current state of Defang's support for AWS, the specific resources that Defang uses, and the roadmap for future support. Defang allows you to configure your services with sensitive config values. Sensitive values are stored in AWS Systems Manager Parameter Store, and are encrypted. To deploy your services, the Defang CLI packages your code and uploads it to an S3 bucket in your account. The CLI then deploys an ECS task that uses Pulumi to build your container image and run your service. The provider runs your workloads using ECS using Fargate. It provisions a VPC with public and private subnets, and deploys your services to the private subnets. It then provisions an Application Load Balancer (ALB) and routes traffic to your services. Defang uses a Route53 private hosted zone for service discovery. Each (private) service in the Compose file will get a CNAME or A record which resolves to the service's AWS domain name or IP, respectively. To update the A records for the dynamically assigned IP addresses, Defang will add a [Route53 sidecar](https://github.com/DefangLabs/route53-sidecar) alongside your container. Defang can help you provision [managed storage](/docs/concepts/managed-storage/managed-storage.md) services. The following managed storage services are supported on AWS: When using [Managed Postgres](/docs/concepts/managed-storage/managed-postgres.mdx), the Defang CLI provisions an RDS Postgres instance in your account. When using [Managed Redis](/docs/concepts/managed-storage/managed-redis.md), the Defang CLI provisions an ElastiCache Redis cluster in your account. Defang offers integration with managed, cloud-native large language model services with the `x-defang-llm` service extension. Add this extension to any services which use the Bedrock SDKs. When using [Managed LLMs](/docs/concepts/managed-llms/managed-language-models.md), the Defang CLI provisions an ElastiCache Redis cluster in your account. Defang will provision a DocumentDB instance for services that use the `x-defang-mongodb` service extension. This allows you to use MongoDB as a managed service, rather than running it as a container. Defang will create and manage the following resources in your AWS account from its bootstrap CloudFormation template: | Resource Type | Example Resource Name | |---------------|------------------------| | s3/Bucket | defang-cd-bucket-cbpbzz8hzm7 | | ecs/ClusterCapacityProviderAssociations | defang-cd-Cluster-pqFhjwuklvm | | ecs/Cluster | defang-cd-ClusterpJqFhjwuklvm | | iam/Role | defang-cd-ExeutionRole-XE7RbQDfeEwx | | ec2/InternetGateway | igw-05bd7adc92541ec3 | | ec2/VPCGatewayAttachment | IGW|vpc-0cbca64f13435695 | | logs/LogGroup | defang-cd-Logroup-6LSZet3tFnEy | | ecr/PullThroughCacheRule | defang-cd-ecrpublic | | ec2/Route | rtb-08f3f5afc9e6c8c8|0.0.0.0/0 | | ec2/RouteTable | rtb-08f3f5ffc9e6c8c8 | | ec2/VPCEndpoint | vpce-02175d8d4f47d0c9 | | ec2/SecurityGroup | sg-032b839c63e70e49 | | ec2/Subnet | subnet-086bead399ddc8a0 | | ec2/SubnetRouteTableAssociation | rtbassoc-02e200d45e7227fe | | ecs/TaskDefinition | arn:aws:ecsus-west-2:381492210770:task-definition/defang-cd-TaskDefinition-RXd5tf9TaN38:1 | | iam/Role | defang-cd-askRole-gsEeDPd6sPQY | | ec2/VPC | vpc-0cbca64f13435695 | Then, for each project you deploy, Defang will create and manage the following resources: | Resource Type | Example Resource Name | |---------------|------------------------| | ecr/Repository | project1/kaniko-build | | ecr/LifecyclePolicy | project1/kaniko-build | | acm/Certificate | *.project1.tenant1.defang.app | | ecr/Repository | project1/kaniko-build/cache | | ecr/LifecyclePolicy | project1/kaniko-build/cache | | iam/InstanceProfile | ecs-agent-profile | | iam/Role | ecs-task-execution-role | | cloudwatch/EventRule | project1-ecs-lifecycle-rule | | cloudwatch/EventTarget | project1-ecs-event-cw-target | | route53/Record | validation-project1.tenant1.defang.app | | acm/CertificateValidation | *.project1.tenant1.defang.appValidation | | ec2/VpcDhcpOptionsAssociation | dhcp-options-association | | cloudwatch/LogGroup | builds | | iam/Role | kaniko-task-role | | ecs/TaskDefinition | kanikoTaskDefArm64 | | ecs/TaskDefinition | kanikoTaskDefAmd64 | | s3/Bucket | defang-build | | s3/BucketPublicAccessBlock | defang-build-block | | ecs/Cluster | cluster | | ecs/ClusterCapacityProviders | cluster-capacity-providers | | ec2/SecurityGroup | project1_app-sg | | ec2/SecurityGroup | bootstrap | | ec2/VpcDhcpOptions | dhcp-options | | cloudwatch/LogGroup | logs |", "path": "/docs/providers/aws/aws" }, { "id": 19, + "about": "Upgrade the Defang CLI to the latest version", + "text": "```\ndefang upgrade [flags]\n```\n\nAliases: `update`\n### Options\n\n```\n -h, --help help for upgrade\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_upgrade" + }, + { + "id": 20, + "about": "Estimate the cost of deploying the current project", + "text": "```\ndefang estimate [flags]\n```\n\n### Options\n\n```\n -h, --help help for estimate\n -m, --mode mode deployment mode; one of [affordable balanced high_availability]\n -r, --region string which cloud region to estimate (default \"us-west-2\")\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_estimate" + }, + { + "id": 21, + "about": "Manage personal access tokens", + "text": "```\ndefang token [flags]\n```\n\n### Options\n\n```\n --expires duration validity duration of the token (default 24h0m0s)\n -h, --help help for token\n --scope string scope of the token; one of [admin delete read tail] (required)\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_token" + }, + { + "id": 22, + "about": "Reads a Compose file and deploy a new project or update an existing project", + "text": "```\ndefang compose up [flags]\n```\n\nAliases: `deploy`\n### Options\n\n```\n -d, --detach run in detached mode\n --force force a build of the image even if nothing has changed\n -h, --help help for up\n -m, --mode mode deployment mode; one of [affordable balanced high_availability]\n --utc show logs in UTC timezone (ie. TZ=UTC)\n --wait-timeout int maximum duration to wait for the project to be running|healthy (default -1)\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n --pulumi-backend string specify an alternate Pulumi backend URL or \"pulumi-cloud\"\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang compose](defang_compose.md)\t - Work with local Compose files\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_compose_up" + }, + { + "id": 23, + "about": "Reads a Compose file and shows the generated config", + "text": "```\ndefang compose config [flags]\n```\n\n### Options\n\n```\n -h, --help help for config\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n --pulumi-backend string specify an alternate Pulumi backend URL or \"pulumi-cloud\"\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang compose](defang_compose.md)\t - Work with local Compose files\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_compose_config" + }, + { + "id": 24, + "about": "Read and/or agree the Defang terms of service", + "text": "```\ndefang terms [flags]\n```\n\nAliases: `tos`, `eula`, `tac`, `tou`\n### Options\n\n```\n --agree-tos agree to the Defang terms of service\n -h, --help help for terms\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_terms" + }, + { + "id": 25, + "about": "Generate a TLS certificate", + "text": "```\ndefang cert generate [flags]\n```\n\nAliases: `gen`\n### Options\n\n```\n -h, --help help for generate\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang cert](defang_cert.md)\t - Manage certificates\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_cert_generate" + }, + { + "id": 26, + "about": "Show logs from one or more services", + "text": "```\ndefang compose logs [SERVICE...] [flags]\n```\n\nAliases: `tail`\n### Options\n\n```\n --deployment string deployment ID of the service\n --filter string only show logs containing given text; case-insensitive\n -h, --help help for logs\n -r, --raw show raw (unparsed) logs\n --since string show logs since duration/time\n --type log-type show logs of type; one of [RUN BUILD ALL] (default RUN)\n --until string show logs until duration/time\n --utc show logs in UTC timezone (ie. TZ=UTC)\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n --pulumi-backend string specify an alternate Pulumi backend URL or \"pulumi-cloud\"\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang compose](defang_compose.md)\t - Work with local Compose files\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_compose_logs" + }, + { + "id": 27, + "about": "Start defang MCP server", + "text": "```\ndefang mcp serve [flags]\n```\n\n### Options\n\n```\n --auth-server int auth server port\n -h, --help help for serve\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang mcp](defang_mcp.md)\t - Manage MCP Server for defang\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_mcp_serve" + }, + { + "id": 28, + "about": "Work with local Compose files", + "text": "### Synopsis\n\nDefine and deploy multi-container applications with Defang. Most compose commands require\na \"compose.yaml\" file. The simplest \"compose.yaml\" file with a single service is:\n\nservices:\n app: # the name of the service\n build: . # the folder with the Dockerfile and app sources (. means current folder)\n ports:\n - 80 # the port the service listens on for HTTP requests\n\n\nAliases: `stack`\n### Options\n\n```\n -h, --help help for compose\n --pulumi-backend string specify an alternate Pulumi backend URL or \"pulumi-cloud\"\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n* [defang compose config](defang_compose_config.md)\t - Reads a Compose file and shows the generated config\n* [defang compose down](defang_compose_down.md)\t - Reads a Compose file and deprovisions its services\n* [defang compose logs](defang_compose_logs.md)\t - Show logs from one or more services\n* [defang compose ps](defang_compose_ps.md)\t - Get list of services in the project\n* [defang compose up](defang_compose_up.md)\t - Reads a Compose file and deploy a new project or update an existing project\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_compose" + }, + { + "id": 29, + "about": "Add, update, or delete service config", + "text": "Aliases: `secrets`, `secret`\n### Options\n\n```\n -h, --help help for config\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n* [defang config create](defang_config_create.md)\t - Adds or updates a sensitive config value\n* [defang config ls](defang_config_ls.md)\t - List configs\n* [defang config rm](defang_config_rm.md)\t - Removes one or more config values\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_config" + }, + { + "id": 30, + "about": "Adds or updates a sensitive config value", + "text": "```\ndefang config create CONFIG [file|-] [flags]\n```\n\nAliases: `set`, `add`, `put`\n### Options\n\n```\n -e, --env set the config from an environment variable\n -h, --help help for create\n --random set a secure randomly generated value for config\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang config](defang_config.md)\t - Add, update, or delete service config\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_config_create" + }, + { + "id": 31, + "about": "Get version information for the CLI and Fabric service", + "text": "```\ndefang version [flags]\n```\n\nAliases: `ver`, `stat`, `status`\n### Options\n\n```\n -h, --help help for version\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_version" + }, + { + "id": 32, + "about": "List active deployments across all projects", + "text": "```\ndefang deployments [flags]\n```\n\nAliases: `deployment`, `deploys`, `deps`, `dep`\n### Options\n\n```\n -h, --help help for deployments\n --utc show logs in UTC timezone (ie. TZ=UTC)\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n* [defang deployments list](defang_deployments_list.md)\t - List deployment history for a project\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_deployments" + }, + { + "id": 33, + "about": "Show logs from one or more services", + "text": "```\ndefang tail [SERVICE...] [flags]\n```\n\nAliases: `logs`\n### Options\n\n```\n --deployment string deployment ID of the service\n --filter string only show logs containing given text; case-insensitive\n -h, --help help for tail\n -r, --raw show raw (unparsed) logs\n --since string show logs since duration/time\n --type log-type show logs of type; one of [RUN BUILD ALL] (default RUN)\n --until string show logs until duration/time\n --utc show logs in UTC timezone (ie. TZ=UTC)\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_tail" + }, + { + "id": 34, + "about": "List deployment history for a project", + "text": "```\ndefang deployments list [flags]\n```\n\nAliases: `ls`\n### Options\n\n```\n -h, --help help for list\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n --utc show logs in UTC timezone (ie. TZ=UTC)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang deployments](defang_deployments.md)\t - List active deployments across all projects\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_deployments_list" + }, + { + "id": 35, + "about": "Log out", + "text": "```\ndefang logout [flags]\n```\n\nAliases: `logoff`, `revoke`\n### Options\n\n```\n -h, --help help for logout\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_logout" + }, + { + "id": 36, + "about": "Create a new Defang project from a sample", + "text": "```\ndefang new [SAMPLE] [flags]\n```\n\nAliases: `init`\n### Options\n\n```\n -h, --help help for new\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_new" + }, + { + "id": 37, + "about": "Generate a sample Defang project", + "text": "```\ndefang generate [flags]\n```\n\nAliases: `gen`\n### Options\n\n```\n -h, --help help for generate\n --model string LLM model to use for generating the code (Pro users only)\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_generate" + }, + { + "id": 38, + "about": "Manage MCP Server for defang", + "text": "### Options\n\n```\n -h, --help help for mcp\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n* [defang mcp serve](defang_mcp_serve.md)\t - Start defang MCP server\n* [defang mcp setup](defang_mcp_setup.md)\t - Setup MCP client for defang mcp server\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_mcp" + }, + { + "id": 39, + "about": "Get list of services in the project", + "text": "```\ndefang services [flags]\n```\n\nAliases: `getServices`, `ps`, `ls`, `list`\n### Options\n\n```\n -h, --help help for services\n -l, --long show more details\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_services" + }, + { + "id": 40, + "about": "Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.", + "text": "### Options\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -h, --help help for defang\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang cert](defang_cert.md)\t - Manage certificates\n* [defang compose](defang_compose.md)\t - Work with local Compose files\n* [defang config](defang_config.md)\t - Add, update, or delete service config\n* [defang deployments](defang_deployments.md)\t - List active deployments across all projects\n* [defang estimate](defang_estimate.md)\t - Estimate the cost of deploying the current project\n* [defang generate](defang_generate.md)\t - Generate a sample Defang project\n* [defang login](defang_login.md)\t - Authenticate to Defang\n* [defang logout](defang_logout.md)\t - Log out\n* [defang mcp](defang_mcp.md)\t - Manage MCP Server for defang\n* [defang new](defang_new.md)\t - Create a new Defang project from a sample\n* [defang services](defang_services.md)\t - Get list of services in the project\n* [defang tail](defang_tail.md)\t - Show logs from one or more services\n* [defang terms](defang_terms.md)\t - Read and/or agree the Defang terms of service\n* [defang token](defang_token.md)\t - Manage personal access tokens\n* [defang upgrade](defang_upgrade.md)\t - Upgrade the Defang CLI to the latest version\n* [defang version](defang_version.md)\t - Get version information for the CLI and Fabric service\n* [defang whoami](defang_whoami.md)\t - Show the current user\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang" + }, + { + "id": 41, + "about": "Get list of services in the project", + "text": "```\ndefang compose ps [flags]\n```\n\nAliases: `getServices`, `services`\n### Options\n\n```\n -h, --help help for ps\n -l, --long show more details\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n --pulumi-backend string specify an alternate Pulumi backend URL or \"pulumi-cloud\"\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang compose](defang_compose.md)\t - Work with local Compose files\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_compose_ps" + }, + { + "id": 42, + "about": "Setup MCP client for defang mcp server", + "text": "```\ndefang mcp setup [flags]\n```\n\n### Options\n\n```\n --client string MCP setup client (supports: claude, windsurf, cursor, vscode)\n -h, --help help for setup\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang mcp](defang_mcp.md)\t - Manage MCP Server for defang\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_mcp_setup" + }, + { + "id": 43, + "about": "Reads a Compose file and deprovisions its services", + "text": "```\ndefang compose down [SERVICE...] [flags]\n```\n\nAliases: `rm`, `remove`\n### Options\n\n```\n -d, --detach run in detached mode\n -h, --help help for down\n --utc show logs in UTC timezone (ie. TZ=UTC)\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n --pulumi-backend string specify an alternate Pulumi backend URL or \"pulumi-cloud\"\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang compose](defang_compose.md)\t - Work with local Compose files\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_compose_down" + }, + { + "id": 44, + "about": "Authenticate to Defang", + "text": "```\ndefang login [flags]\n```\n\n### Options\n\n```\n -h, --help help for login\n --training-opt-out Opt out of ML training (Pro users only)\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_login" + }, + { + "id": 45, + "about": "Show the current user", + "text": "```\ndefang whoami [flags]\n```\n\n### Options\n\n```\n -h, --help help for whoami\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_whoami" + }, + { + "id": 46, + "about": "List configs", + "text": "```\ndefang config ls [flags]\n```\n\nAliases: `list`\n### Options\n\n```\n -h, --help help for ls\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang config](defang_config.md)\t - Add, update, or delete service config\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_config_ls" + }, + { + "id": 47, + "about": "Manage certificates", + "text": "### Options\n\n```\n -h, --help help for cert\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang](defang.md)\t - Defang CLI is used to take your app from Docker Compose to a secure and scalable deployment on your favorite cloud in minutes.\n* [defang cert generate](defang_cert_generate.md)\t - Generate a TLS certificate\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_cert" + }, + { + "id": 48, + "about": "Removes one or more config values", + "text": "```\ndefang config rm CONFIG... [flags]\n```\n\nAliases: `del`, `delete`, `remove`\n### Options\n\n```\n -h, --help help for rm\n```\n\n### Options inherited from parent commands\n\n```\n --color color-mode colorize output; one of [never auto always] (default auto)\n -C, --cwd string change directory before running the command\n --debug debug logging for troubleshooting the CLI\n --dry-run dry run (don't actually change anything)\n -f, --file stringArray compose file path(s)\n -T, --non-interactive disable interactive prompts / no TTY\n --org string override GitHub organization name (tenant)\n -p, --project-name string project name\n -P, --provider provider bring-your-own-cloud provider; one of [defang aws digitalocean gcp] (default auto)\n -v, --verbose verbose logging\n```\n\n### SEE ALSO\n\n* [defang config](defang_config.md)\t - Add, update, or delete service config\n\n###### Auto generated by spf13/cobra on 3-Jul-2025", + "path": "/docs/cli/defang_config_rm" + }, + { + "id": 49, "about": "Authentication", "text": "To do pretty much anything with Defang, you'll need to authenticate with the system. You can do this by running the following command in the [CLI](/docs/getting-started): ```bash defang login ``` This will prompt you to open a browser and log in to your [Defang account](/docs/concepts/accounts). For now, the only way to log in is with GitHub, though we will offer other providers to authenticate in the future. Once you've logged in, you can close the browser and return to the terminal. You should see a message that you've successfully logged in. :::tip Keep in mind that your Defang account is separate from your [cloud provider account](/docs/concepts/defang-byoc). You will need to authenticate with your cloud provider account separately to deploy services to your own cloud account. :::", "path": "/docs/concepts/authentication" }, { - "id": 20, + "id": 50, "about": "Debug, How It Works", "text": "Defang includes an AI-driven tool in the CLI (command-line interface) to help you debug your cloud applications. The AI agent will use your service logs as well as the files in your project to help you identify and resolve issues. :::info Defang has another AI-driven tool called [`generate`](/docs/concepts/generate). ::: Here is a typical workflow in the [Defang CLI](/docs/getting-started) that will automatically run the AI debugger tool: 1. When you deploy a project with Defang (i.e. `defang compose up`), the CLI will wait for all services' statuses to switch to healthy. 2. If any service fails to deploy, the AI debugger will kick in and ask for permission. 3. The AI agent will analyze the logs and files in your project to identify the issue(s). 4. Then, it will provide you with the suggested fix(es) in the terminal. :::tip The AI debugger will not change your files. Instead, it will show you a suggestion, and it is up to you if you want to use it in your code. ::: The AI debugger only kicks in when any service in a project fails to deploy. This could be because of a build failure, healthchecks failing, or a variety of other issues. :::info The AI debugger only kicks in when any service in a project fails to deploy. At the moment, we do not offer any way to trigger the AI debugger manually. :::", "path": "/docs/concepts/debug" }, { - "id": 21, + "id": 51, "about": "Run-time Resources, Examples, Docker Compose, Pulumi", - "text": "You can configure the resources available to your Defang services as required. You can configure the CPU, and memory allocated to your services as well as the number of replicas and whether or not your services requires access to GPUs. ```yaml services: gpu-service: deploy: replicas: 3 resources: reservations: cpus: '1.0' memory: 2048M devices: - capabilities: [\"gpu\"] ``` ```typescript const service = new defang.DefangService(\"gpu-service\", { deploy: { replicas: 3, resources: { reservations: { cpu: 1.0, memory: 2048, devices: [{capabilities: ['gpu']}] } } } }); ``` :::info GPUs If you require access to GPUs, you can specify this in the `deploy.resources.reservations.devices[0].capabilities` section of your service as in the examples above. You can learn more about this in the [Docker-Compose documentation](https://docs.docker.com/compose/gpu-support/). This is the only supported value in the `deploy.resources.reservations.devices` section. :::", + "text": "You can configure the resources available to your Defang services as required. You can configure the CPU, and memory allocated to your services as well as the number of replicas and whether or not your services requires access to GPUs. ```yaml services: gpu-service: deploy: replicas: 3 resources: reservations: cpus: \"1.0\" memory: 2048M devices: - capabilities: [\"gpu\"] ``` ```typescript const service = new defang.DefangService(\"gpu-service\", { deploy: { replicas: 3, resources: { reservations: { cpu: 1.0, memory: 2048, devices: [{ capabilities: [\"gpu\"] }], }, }, }, }); ``` :::info[GPUs] If you require access to GPUs, you can specify this in the `deploy.resources.reservations.devices[0].capabilities` section of your service as in the examples above. You can learn more about this in the [Docker-Compose documentation](https://docs.docker.com/compose/gpu-support/). This is the only supported value in the `deploy.resources.reservations.devices` section. :::", "path": "/docs/concepts/resources" }, { - "id": 22, + "id": 52, "about": "Build-time Resources", - "text": "You can configure the memory requirements and disk space requirements for your image builds by using the `shm_size` property of your service's [`build` specification](https://github.com/compose-spec/compose-spec/blob/main/build.md). For example, ```yaml services: my_service: build: context: . dockerfile: Dockerfile shm_size: 2G ``` :::info Defang uses `shm_size` to configure both the memory and disk space available to your build process. ::: The default `shm_size` values for each platform are as follows. More or less may be specified. | Platform | `shm_size` Minimum | |---------------|--------------------| | AWS | 16G | | Digital Ocean | 8G | | GCP | 16G |", + "text": "You can configure the memory requirements and disk space requirements for your image builds by using the `shm_size` property of your service's [`build` specification](https://github.com/compose-spec/compose-spec/blob/main/build.md). For example, ```yaml services: my_service: build: context: . dockerfile: Dockerfile shm_size: 2G ``` :::info Defang uses `shm_size` to configure both the memory and disk space available to your build process. ::: The default `shm_size` values for each platform are as follows. More or less may be specified. | Platform | `shm_size` Minimum | | ------------- | ------------------ | | AWS | 16G | | Digital Ocean | 8G | | GCP | 16G |", "path": "/docs/concepts/resources" }, { - "id": 23, + "id": 53, "about": "Deployment, Deploying Updates, Zero Downtime Deployments, Deployment Modes, Instance Types", - "text": "When you deploy using Defang, whether it's with `defang compose up` with a [Compose file](./compose.md) or using a [Pulumi program](./pulumi.md), Defang will build your services in the cloud and manage the deployment process for you. If you provide a Dockerfile and build context, Defang will upload the files found within the build context to the cloud (either yours in [Defang BYOC](./defang-byoc.md) or ours in [Defang Playground](./defang-playground.md)), build the image, and store it in the cloud provider's container registry. When you run a deployment to update one or more [services](/docs/concepts/services), Defang will build new images for your services, and provision new resources to replace your existing services. Defang can deploy your services using different [modes](/docs/concepts/deployment-modes). When using the `production` mode, Defang will make sure the new replacement services are healthy before deprovisioning your existing services. By default, using the `development` mode, Defang will deprovision your existing services before provisioning replacements. This helps reduce costs. :::info In [Defang BYOC](./defang-byoc.md), Defang uses your cloud provider account to build and store your images. In [Defang Playground](./defang-playground.md), we build and store your images for you. ::: As mentioned above, Defang offers different [deployment modes](/docs/concepts/deployment-modes): `development`, `staging`, and `production`. You can switch the modes using the `--mode` CLI flag. :::warning Workloads with GPUs do not support zero downtime deployments. If you have a workload with a GPU, you will experience downtime during updates. ::: Defang defaults to \"spot\" instances. This is a cost-effective way to run your workloads, but it does mean that your workloads can be interrupted at any time. This is consistent with the [12 Factor](https://12factor.net/) principle of [disposability](https://12factor.net/disposability). :::info In the future, we may provide a way to use \"on-demand\" instances for workloads that require more stability. :::", + "text": "When you deploy using Defang, whether it's with `defang compose up` with a [Compose file](./compose.md) or using a [Pulumi program](./pulumi.md), Defang will build your services in the cloud and manage the deployment process for you. If you provide a Dockerfile and build context, Defang will upload the files found within the build context to the cloud (either yours in [Defang BYOC](./defang-byoc.md) or ours in [Defang Playground](./defang-playground.md)), build the image, and store it in the cloud provider's container registry. When you run a deployment to update one or more [services](/docs/concepts/services), Defang will build new images for your services, and provision new resources to replace your existing services. Defang can deploy your services using different [modes](/docs/concepts/deployment-modes). When using the `high_availability` mode, Defang will make sure the new replacement services are healthy before deprovisioning your existing services. By default, using the `affordable` mode, Defang will deprovision your existing services before provisioning replacements. This helps reduce costs. :::info In [Defang BYOC](./defang-byoc.md), Defang uses your cloud provider account to build and store your images. In [Defang Playground](./defang-playground.md), we build and store your images for you. ::: As mentioned above, Defang offers different [deployment modes](/docs/concepts/deployment-modes): `affordable`, `balanced`, and `high_availability`. You can switch the modes using the `--mode` CLI flag. :::warning Workloads with GPUs do not support zero downtime deployments. If you have a workload with a GPU, you will experience downtime during updates. ::: Defang defaults to \"spot\" instances. This is a cost-effective way to run your workloads, but it does mean that your workloads can be interrupted at any time. This is consistent with the [12 Factor](https://12factor.net/) principle of [disposability](https://12factor.net/disposability). :::info In the future, we may provide a way to use \"on-demand\" instances for workloads that require more stability. :::", "path": "/docs/concepts/deployments" }, { - "id": 24, + "id": 54, + "about": "Model Context Protocol (MCP) Server, Installation, Supported IDEs, Cursor, Windsurf, VS Code, VS Code Insiders, Claude Desktop, MCP Tools, `deploy`, `services`, `destroy`, Example Prompts", + "text": "The Defang [Model Context Protocol (MCP) Server](https://github.com/DefangLabs/defang/tree/main/src/pkg/mcp) includes built-in tools to allow users to deploy and manage cloud services through a supported IDE. Using this MCP Server with an IDE will enable the AI coding agent (e.g. Copilot) to use Defang tools and resources to perform tasks, such as deploying a service to the cloud. This means you can now use Defang with IDE-integrated AI coding agents. ![Defang MCP Server Diagram](/img/mcp-concept/diagram.png) For more details about MCP architecture, visit the [official MCP documentation](https://modelcontextprotocol.io/introduction). :::info This page is a guide to the Defang MCP Server detailing its installation, tools, and usage. If you are looking for an example of how you can deploy a MCP project with Defang, please instead refer to our [MCP sample application](https://github.com/DefangLabs/samples/tree/main/samples/mcp). ::: Ensure that you have the [npm package manager](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) installed, as `npx` commands are required for setup. :::warning At this time, the Defang MCP Server can only be installed using `npx`. Other methods are not yet supported. ::: Run the setup command in your terminal for your IDE of choice from the [Supported IDEs](#supported-ides) section. This will connect the Defang MCP Server to your IDE. The general format of the command is as follows: ```bash npx -y defang@latest mcp setup --client= ``` Once the command completes, you may need to restart your IDE for the changes to take effect. Once the MCP Server is running, you can access the Defang MCP tools directly through the AI agent chat in your IDE. That's it! Feel free to explore our [Example Prompts](#example-prompts) to get ideas on how to interact with the AI agent and make the most of the Defang MCP Server. Setup command: ```bash npx -y defang@latest mcp setup --client=cursor ``` Once setup is complete, you can interact with the AI coding agent using Defang-related actions like `check defang services` or [other prompts](#example-prompts). Here's an example of what it could look like: ![Cursor](/img/mcp-concept/cursor.png) Setup command: ```bash npx -y defang@latest mcp setup --client=windsurf ``` Once setup is complete, you can interact with the AI coding agent using Defang-related actions like `check defang services` or [other prompts](#example-prompts). Here's an example of what it could look like: ![Windsurf](/img/mcp-concept/windsurf.png) Setup command: ```bash npx -y defang@latest mcp setup --client=vscode ``` Once setup is complete, you can interact with the AI coding agent using Defang-related actions like `check defang services` or [other prompts](#example-prompts). Here's an example of what it could look like: ![VS Code](/img/mcp-concept/vscode.png) Setup command: ```bash npx -y defang@latest mcp setup --client=vscode-insiders ``` Once setup is complete, you can interact with the AI coding agent using Defang-related actions like `check defang services` or [other prompts](#example-prompts). Here's an example of what it could look like: ![VS Code](/img/mcp-concept/vscode-insiders.png) While this is not an IDE in the traditional sense, it can support MCP servers. For a smoother experience, consider specifying a project name or directory when making chat prompts to this platform. Setup command: ```bash npx -y defang@latest mcp setup --client=claude ``` Once setup is complete, you can interact with the AI coding agent using Defang-related actions like `check defang services` or [other prompts](#example-prompts). Here's an example of what it could look like: ![Claude Desktop](/img/mcp-concept/claude.png) Below are the tools available in the Defang MCP Server. The `deploy` tool scans your project directory for Dockerfiles and `compose.yaml` files, then deploys the detected service(s) using Defang. You can monitor the deployment process in the Defang Portal. :::info The Defang MCP Server currently supports deployments to [Defang Playground](/docs/providers/playground). We plan to support BYOC in future updates. ::: The `services` tool displays the details of all your services that are currently deployed in your project with Defang. It shows the Service Name, Deployment ID, Public URL and Service Status. If there are no services found, it will display an appropriate message. Given a project name or directory, the `destroy` tool identifies any services deployed with Defang and terminates them. If no services are found, it will display an appropriate message. After connecting the Defang MCP Server to your IDE using an installation method, you can type in prompts in your chat to invoke the AI agent to use any MCP tool(s). For example, you can ask the AI agent: ``` can you deploy this to defang? ``` ``` please destroy this project. ``` ``` what services do I have? ``` You can also choose to specify a project name or project directory if you do not have the project open: ``` deploy this with defang /Users/yourname/Documents/project1 ``` ``` do I have a service called project1 ``` Feel free to try any of these prompts or create your own!", + "path": "/docs/concepts/mcp" + }, + { + "id": 55, "about": "Pulumi, Install the Defang Pulumi Provider, When to Use the Defang Pulumi Provider, Example, API, `DefangService`, `DefangServiceArgs`, `Platform`, `Protocol`, `DeviceCapability`, `NetworkName`, `Network`", "text": ":::warning Pulumi support is currently only available for Defang Playground. We are working on support for Defang BYOC. ::: [Pulumi](https://www.pulumi.com) is a modern infrastructure-as-code toolkit that allows developers to use a programming language like Typescript to provision and manage cloud resources. Defang provides a Pulumi [_Provider_](https://www.pulumi.com/docs/iac/concepts/resources/providers/) written in Typescript which can be used to deploy Defang services alongside other Pulumi-managed infrastructure. To get started with Pulumi and Defang you will need to install the Defang provider in [your Pulumi project](https://www.pulumi.com/learn/pulumi-fundamentals/create-a-pulumi-project/): ```bash npm i @defang-io/pulumi-defang ``` ```bash pnpm i @defang-io/pulumi-defang ``` ```bash yarn add @defang-io/pulumi-defang ``` The Defang Pulumi Provider is a good option for developers with more complex requirements than those supported by a [Compose File](/docs/concepts/compose). One reason to use the Defang Pulumi provider is if you wish to integrate your services with other cloud resources. This is particularly true if you need to configure your services dynamically as other cloud resources are being provisioned. Another reason would be if you want to deploy your services alongside cloud-specific resources, like a DynamoDB table, or an S3 bucket. The following is a minimal example of a Pulumi program that defines a Defang service: ```typescript import * as defang from \"@defang-io/pulumi-defang/lib\"; const service = new defang.DefangService(\"my-service\", { image: \"strm/helloworld-http:latest\", ports: [{ target: 80, mode: \"ingress\", protocol: \"http\", }], }); ``` :::info See the [Deploy using Pulumi](/docs/tutorials/deploy-using-pulumi) tutorial for more information about how to use it. ::: ```typescript constructor( name: string, args: DefangServiceArgs, opts?: pulumi.CustomResourceOptions ) ``` ```typescript interface DefangServiceArgs { /** the DNS name of the Defang Fabric service; defaults to the value of DEFANG_FABRIC or prod, if unset */ fabricDNS?: pulumi.Input; /** the name of the service; defaults to the name of the resource */ name?: pulumi.Input; /** the container image to deploy; required when no build configuration was provided */ image?: pulumi.Input; /** the platform to deploy to; defaults to \"linux/amd64\" */ platform?: pulumi.Input; /** which network the service is in, ie. whether the service requires a public IP or not; defaults to \"private\" (was: internal=true) */ networks?: { [key in NetworkName]?: Network }; /** the optional deployment configuration */ deploy?: pulumi.Input; /** the ports to expose */ ports?: pulumi.Input[]>; /** the environment variables to set; use `null` to mark at sensitive */ environment?: pulumi.Input<{ [key: string]: pulumi.Input | null }>; /** the secrets to expose as environment variables @deprecated use environment key with value `null` */ secrets?: pulumi.Input[]>; /** force deployment of the service even if nothing has changed */ forceNewDeployment?: pulumi.Input; /** the command to run; overrides the container image's CMD */ command?: pulumi.Input[]>; /** the optional build configuration; required when no image was provided */ build?: pulumi.Input; /** the optional health-check test for the service */ healthcheck?: pulumi.Input; /** the optional fully qualified domain name for the service; requires CNAME to the publicFqdn */ domainname?: pulumi.Input; /** experimental: mark this service as (managed) Redis */ x_redis?: pulumi.Input; /** experimental: mark this service as serving static files */ x_static_files?: pulumi.Input; /** if true, this provider will wait for the service to reach a steady state before continuing */ waitForSteadyState?: pulumi.Input; /** the project to deploy the service to */ project?: pulumi.Input; } ``` ```typescript type Platform = \"linux/arm64\" | \"linux/amd64\" | \"linux\"; ``` ```typescript type Protocol = \"tcp\" | \"udp\" | \"http\" | \"http2\" | \"grpc\"; ``` ```typescript type DeviceCapability = \"gpu\"; ``` ```typescript type NetworkName = \"private\" | \"public\"; ``` ```typescript type Network = { aliases?: string[] } | null; ```", "path": "/docs/concepts/pulumi" }, { - "id": 25, - "about": "Networking, Internal Communication, Sample Configuration, Internal DNS", - "text": "Defang configures Security Groups, deploys applications to a private subnet and uses an Application Load Balancer to route traffic to your services from the public internet only when required. :::tip This page is about internal networking only. If you want to configure your services to be accessible from the public internet, check the [Domains page](./domains.mdx). ::: You can expose ports in your service definition to allow other services to communicate with it. Similar to public communication, you can use the `ports` section of your service definition, but set the `mode` to `host` instead of `ingress` to allow other services to communicate with it through the internal network. ```yaml services: # [...] service1: ports: - mode: host target: 3000 app_protocol: http ``` ```typescript const service = new defang.DefangService(\"service1\", { // [...] ports: [{ target: 3000, mode: \"host\", protocol: \"http\", }], }); ``` Internal communication is handled slightly differently between the Defang Playground and Defang BYOC. Internal communication between services in the Defang Playground follows the following pattern: ``` http://-: ``` Internal communication between services in Defang BYOC follows the following pattern: ``` http://: ``` ", + "id": 56, + "about": "Networking, Networks, Public Services, Private Services, Hostname Aliases, Internal DNS", + "text": "By default, Defang configures your application's networking and security groups to follow secure best practices. We also configure load-balancers and public IP addresses when appropriate. The following sections describe how to configure different network and security group topologies. :::tip This page is about complex networking. If you want to configure your services to be accessible from the public internet, check the [Domains page](./domains.mdx). ::: The Compose spec has a notion of [networks](https://github.com/compose-spec/compose-spec/blob/main/06-networks.md). By default, each service gets added to the `default` network. Services in the `default` network can have public IPs. Services in any other network will be in a private subnet. These services cannot be reached from outside the network, but they can still make network requests to the public internet. ```yaml services: frontend: build: ./fe ports: - 80 # load-balanced, ie. mode: ingress networks: default: private: backend: build: ./be ports: - mode: host # no load balancer target: 8080 networks: private: networks: default: private: internal: true # no egress ``` By default, services will be in the `default` network. By default these services are not accessible directly, but may be accessed through a public load-balancer, ie. exposed ports default to `mode: ingress`: ```yaml services: web: networks: default: # this is the default, so no need to specify ports: - 80:80 # Defang will use a public load-Balancer ``` If you want a service to have a public IP address, ensure it's in the `default` network (the default) and set the port to `mode: host`: ```yaml services; web: ports: - target: 80 mode: host # Defang will assign a public IP ``` If you want a service with exposed ports to not be accessible from the public internet, create a private network: ```yaml services: web: # this service can receive public traffic and communicate to private services ports: - 80 networks: default: private: db: # this service can only receive traffic from other services in the same network ports: - 1234 networks: private: networks: private: # any network that's not \"default\" is considered private ``` The service's hostname will be the same as the service's name, in this case `db`. By using network aliases, a service can be made available at multiple hostnames. ```yaml services: web: domainname: example.com networks: default: aliases: - www.example.com # a public alias ``` Internal communication is handled slightly differently between the Defang Playground and Defang BYOC. Internal communication between services in the Defang Playground follows the following pattern: ``` http://-: ``` The Defang CLI applies the `` prefix when it detects service names in the values of environment variables. Internal communication between services in Defang BYOC follows the following pattern: ``` http://: ``` ", "path": "/docs/concepts/networking" }, { - "id": 26, + "id": 57, + "about": "Local Development, `compose.local.yaml`", + "text": "While Defang encourages using a single compose file, there are times when it is more convenient to use different files for different environments. For example, if some services should be configured differently locally: maybe you want to mount your working directory as a volume for live-reloading, or you want to build a different container image for local development. Another reason may be that you have some local services, like an auth mock or a local webhook listener that you don't want to deploy to the cloud. In these cases, Defang recommends creating a file like `compose.local.yaml` which extends service definitions from your primary `compose.yaml` file. For example, ```yaml", + "path": "/docs/concepts/local-development" + }, + { + "id": 58, + "about": "compose.yaml", + "text": "services: app: build: context: . dockerfile: Dockerfile ``` ```yaml", + "path": "/docs/concepts/local-development" + }, + { + "id": 59, + "about": "compose.local.yaml", + "text": "services: app: extends: file: compose.yaml service: app build: context: . dockerfile: local.Dockerfile volumes: - .:/web local_service: image: myservice:latest ``` This configuration can then be launched locally with ``` docker compose -f compose.local.yaml ``` And the primary compose file can be deployed to the cloud with ``` defang compose up ``` See our [blog post on Dev Containers and Codespaces](/blog/2024/03/20/dev-environments) for more information on getting set up in those environments.", + "path": "/docs/concepts/local-development" + }, + { + "id": 60, "about": "Domains, Defang-hosted Domains, Structure, Sample Configuration, Bring Your Own Domain, BYOC or Defang Playground, BYOC, Sample Configuration, Current Support", "text": "Defang helps you provision SSL certificates and expose your services to the internet. You can expose your services using Defang-hosted domains or bring your own custom domain with [Defang BYOC](./defang-byoc.md). This page explains how to configure your services to be accessible from the public internet. To make your service accessible from the public internet with Defang-hosted domains (under `defang.app`), you simply expose ports in your service definition. Defang-hosted domains use a structure that is based on your username and service name. If you have a publicly accessible service, you will automatically receive a domain to access it. If you're deploying to the [Defang Playground](./defang-playground.md), your domain will be structured as follows: ``` https://---.defang.dev ``` If you're using [Defang BYOC](./defang-byoc.md), your domain will be: ``` https://--...defang.app ``` ```yaml services: # [...] serviceName: ports: // highlight-start - mode: ingress target: 3000 protocol: http // highlight-end ``` :::warning As of now, the Pulumi provider does not support [Defang BYOC](./defang-byoc.md). ::: ```typescript const service = new defang.DefangService(\"serviceName\", { // [...] ports: [{ target: 3000, mode: \"ingress\", protocol: \"http\", }], }); ``` You can also bring your own domain to a Defang project. This allows you to use your own domain name to access your services, and varies a little bit depending on your setup. If you are using [Defang BYOC](./defang-byoc.md) and *do not* have your domain's DNS hosted with your cloud provider, or if you are using the [Defang Playground](./defang-playground.md), you will need to follow this flow: 1. Add the `domainname` to your service definition. 2. Run `defang compose up` to deploy your project. 3. Run `defang cert generate` to generate an SSL certificate. This command will provide instructions for where to point your domain's DNS before the certificate can be issued. If you are using [Defang BYOC](./defang-byoc.md) *and the DNS for your domain is hosted with your cloud provider* (i.e. a Hosted Zone in Route 53, if you're using AWS), all you need to do is specify the `domainname` in your service definition, as in the example below. :::warning For the time being, you can only use one domain per service. If you need to use multiple domains, please vote on this [issue](https://github.com/DefangLabs/defang/issues/247). ::: ```yaml services: # [...] serviceName: // highlight-next-line domainname: example.com ports: // highlight-start - mode: ingress target: 3000 protocol: http // highlight-end ``` :::warning As of now, the Pulumi provider does not support [Defang BYOC](./defang-byoc.md) and the [Defang Playground](./defang-playground.md) does not support bringing your own domain, since it is not for production use. ::: | Provider | BYOD | | --- | --- | | [Playground](/docs/providers/playground) | ✅ no apex domains | | [AWS](/docs/providers/aws) | ✅ | | [DigitalOcean](/docs/providers/digitalocean) | ❌ | | [GCP](/docs/providers/gcp) | ✅ |", "path": "/docs/concepts/domains" }, { - "id": 27, + "id": 61, "about": "Deployment Modes, Deployment Mode Comparison", - "text": "Defang provides three deployment modes: development, staging, and production. These modes allow you to balance cost and resiliency according to your needs. * **Development**: This mode is used for development and testing purposes. It typically involves less stringent resource allocations and may include debugging tools and verbose logging to aid in development. * **Staging**: This mode serves as a pre-production environment where applications are tested in conditions that closely mimic production. It helps in identifying issues that might not be apparent in the development environment. * **Production**: This mode is used for live deployments. It involves optimized configurations for performance, security, and reliability. Resource allocations are typically higher, and debugging tools are minimized to ensure stability. | Feature | Development | Staging | Production | |-|-|-|-| | Build Resources | Builds will be run with 2x vCPUs | Builds will be run with 2x vCPUs | Builds will be run with 4x vCPUs | | Compute | Using spot instances | (like development) | On-demand instances | | Databases | Defang will provision resources optimized for burstable memory | (like production) | Defang will provision resources optimized for production | | Deployment | Previous deployments will be spun down before new deployments are spun up. | (like production) | Rolling updates will be used to deploy new versions. Defang will gradually replace services while maintaining at least [the original number of replicas](/docs/tutorials/scaling-your-services). | | Logging | Logs retained for 1 day to save costs. | | Logs retained for 30 days for compliance. | | Networking | | (like production) | Defang will provision a NAT gateway. | | Load Balancing | HTTP redirect to HTTPS using `302 Found` | | Termination Protection will be enabled; logs are retained on \"down\" | | DNS | Defang will provision shorter TTLs; zones will be forcefully destroyed | | Defang will provision longer TTLs; records can be overwritten for ZDT | | Managed storage | Operations that cause downtime are allowed | | Encryption at rest; Final snapshot created on \"down\" | | Logs | 1 day retention | 7 days retention | 30 days retention |", + "text": "Defang provides three deployment modes: `affordable`, `balanced`, and `high_availability`. These modes allow you to balance cost and resiliency according to your needs. * **Affordable**: This mode is used for development and testing purposes. It typically involves less stringent resource allocations and may include debugging tools and verbose logging to aid in development. * **Balanced**: This mode serves as a pre-production environment where applications are tested in conditions that closely mimic production. It helps in identifying issues that might not be apparent in the development environment. * **High Availability**: This mode is used for live deployments. It involves optimized configurations for performance, security, and reliability. Resource allocations are typically higher, and debugging tools are minimized to ensure stability. | Feature | Affordable | Balanced | High Availability | |-|-|-|-| | Build Resources | Builds will be run with 2x vCPUs | Builds will be run with 2x vCPUs | Builds will be run with 4x vCPUs | | Compute | Using spot instances | (like `affordable`) | On-demand instances | | Databases | Defang will provision resources optimized for burstable memory | (like `high_availability`) | Defang will provision resources optimized for production | | Deployment | Previous deployments will be spun down before new deployments are spun up. | (like `high_availability`) | Rolling updates will be used to deploy new versions. Defang will gradually replace services while maintaining at least [the original number of replicas](/docs/tutorials/scaling-your-services). | | Logging | Logs retained for 1 day to save costs. | | Logs retained for 30 days for compliance. | | Networking | | (like `high_availability`) | Defang will provision a NAT gateway. | | Load Balancing | HTTP redirect to HTTPS using `302 Found` | | Termination Protection will be enabled; logs are retained on \"down\" | | DNS | Defang will provision shorter TTLs; zones will be forcefully destroyed | | Defang will provision longer TTLs; records can be overwritten for ZDT | | Managed storage | Operations that cause downtime are allowed | | Encryption at rest; Final snapshot created on \"down\" | | Logs | 1 day retention | 7 days retention | 30 days retention |", "path": "/docs/concepts/deployment-modes" }, { - "id": 28, + "id": 62, "about": "Projects, Project Names", "text": "A _project_ refers to a cohesive collection of services which are defined and managed using a [`compose.yaml` file](./compose.md). The _project name_ can be defined in the Compose file with the [`name` property](https://docs.docker.com/compose/compose-file/04-version-and-name/#name-top-level-element), otherwise the base name of the project directory will be used. The project name may then be used when performing project-wide operations such as listing services, tailing logs, or deprovisioning. For example: ``` defang services --project-name defang tail --project-name defang compose down --project-name ``` By default `defang` will use the project name from the `compose.yaml` file in the working directory. The [`--project-name` CLI flag](/docs/cli/defang) may be used when running `defang` from outside the working directory.", "path": "/docs/concepts/projects" }, { - "id": 29, + "id": 63, "about": "Portal", "text": "You can log into the Defang Portal at [portal.defang.dev](https://portal.defang.dev) using your [Defang account](./accounts.md) to view the state of your Defang services running in the Defang Playground for non-production applications. You can use the Portal to get an overview of your [services](/docs/concepts/services), view the logs for each service, quickly access exposed ports, view environment variables, and more. :::info The Defang Portal displays services deployed to Defang Playground. To view services deployed to Defang BYOC, please check out [Monitoring Your Services](/docs/tutorials/monitoring-your-services). ::: :::tip Need help with a failing deployment? Defang provides a tool to help [debug](/docs/concepts/debug) in your application. :::", "path": "/docs/concepts/portal" }, { - "id": 30, + "id": 64, "about": "Generate, Example Prompts", "text": "Defang includes an AI-driven agent that translates natural language prompts to an outline for your project that you can then refine. The AI agent is available through the [Defang CLI](../getting-started#install-the-defang-cli.md). We are working on expanding the range of supported prompts and improving the accuracy of the AI agent. If you have any feedback or suggestions, please let us know by [opening an issue](https://github.com/DefangLabs/defang/issues/new). :::info This page is about Defang's AI tool for project outline generation. We also have an AI tool for [debugging](/docs/concepts/debug). ::: Here are some example prompts that the AI agent can help you with: ``` A basic service with 2 REST endpoints. The default endpoint will be for health check and should return a JSON object like this: { \"status\": \"OK\" }. The /echo endpoint will echo back all request parameters in the response. ``` ``` A simple service that runs in the cloud and serves a web page ``` ``` A simple flask app with one route that responds with a json object ``` ``` A simple next.js app that serves a static page ``` ``` A simple api with a single route built on express ``` ``` An HTTP GET endpoint that returns an HTML with a random meme from the imgflip API ``` **An example of a more complex prompt** ``` A service that uses bullmq and redis. The compose file should also include a redis service. The code should set up a queue. It should also expose three endpoints with express: one to add a job to the queue, another to check how many tasks are in the queue, and a last one that runs on / which is just a healthcheck. The code should also include a worker that processes the queue by logging what's in each task. Use environment variables to configure the redis connection. No password. ```", "path": "/docs/concepts/generate" }, { - "id": 31, + "id": 65, "about": "Accounts, Creating an Account, Structure", - "text": "Below, we will cover account creation with Defang, and how resources are organized within a cloud environment using your account info. Defang requires an account so we can organize your resources and provide you with a way to manage them. We also use your account to authenticate you when you interact with [Defang Playground](./defang-playground.md) and the [Defang Portal](./portal.md). Billing and payment information is also associated with your account. To create an account, log in to Defang and accept the [Terms of Service](https://defang.io/policies/terms-service/) using the [CLI](/docs/concepts/authentication.md). At the moment, the only way to authenticate with Defang is through GitHub. We plan to offer other [authentication](/docs/concepts/authentication.md) providers in the future. :::tip Keep in mind that your Defang account is separate from your [cloud provider account](./defang-byoc.md). You will need to authenticate with your cloud provider account separately to deploy services to your own cloud account. ::: Your GitHub username will be used as your Defang username. Your Defang username is used to group all your [services](/docs/concepts/services) and generate domains for your services with the following structure: ``` ---.prod1.defang.dev ``` For more about domain structures, see our [Domains](/docs/concepts/domains#structure) page.", + "text": "Below, we will cover account creation with Defang, and how resources are organized within a cloud environment using your account info. Defang requires an account so we can organize your resources and provide you with a way to manage them. We also use your account to authenticate you when you interact with [Defang Playground](./defang-playground.md) and the [Defang Portal](./portal.md). Billing and payment information is also associated with your account. To create an account, log in to Defang and accept the [Terms of Service](https://defang.io/policies/terms-service/) using the [CLI](/docs/concepts/authentication.md). ``` $ defang login ``` At the moment, the only way to authenticate with Defang is through GitHub. We plan to offer other [authentication](/docs/concepts/authentication.md) providers in the future. :::tip Keep in mind that your Defang account is separate from your [cloud provider account](./defang-byoc.md). You will need to authenticate with your cloud provider account separately to deploy services to your own cloud account. ::: Your GitHub username will be used as your Defang username. Your Defang username is used to group all your [services](/docs/concepts/services) and generate domains for your services with the following structure: ``` ---.prod1.defang.dev ``` For more about domain structures, see our [Domains](/docs/concepts/domains#structure) page.", "path": "/docs/concepts/accounts" }, { - "id": 32, + "id": 66, "about": "Compose, How It Works, Example of a Compose File, Compose Top-level Properties, `services`, `networks`, `volumes`", - "text": "Defang allows you to use `compose.yaml` files to deploy your application to the cloud. The `compose.yaml` file is a simple way to define and run multi-container applications. This file format may look familiar to you if you've used [Docker](https://docker.com). The [Compose Specification](https://github.com/compose-spec/compose-spec/blob/main/spec.md#compose-file) lets you define a platform-agnostic application designed as a set of containers which are configured to run together with shared resources. These applications may be destined for any [OCI](https://opencontainers.org/) Container Runtime. Defang does the heavy lifting to deploy to your favourite cloud platform using this file. You can create a `compose.yaml` file in the root of your project, or use the [`defang generate`](../tutorials/generate-new-code-using-ai.mdx) command to create one for you (along with other resources). This file is used to define your application's [services](./services.md) and how they run. You can edit this file to add more services or change the configuration of services. When you run `defang compose up`, Defang will read your `compose.yaml` file and [deploy](./deployments.md) the services named in that file to the cloud. Here is a basic `compose.yaml` file that contains all the required properties for deployment in Defang. ```yaml services: service-example: image: nginx:latest # use one of: image (shown on this line) or build (shown below) # build: # context: . # dockerfile: Dockerfile ports: - mode: ingress # specify ports to expose target: 8080 published: 8080 # this is useful for running locally ``` Here are a list of top-level properties of the [Compose specification](https://docs.docker.com/compose/compose-file/) that Defang supports when writing a `compose.yaml` file. (Required) The services defined in your application. ```yaml services: service: # add service-level properties here ``` :::info Defang identifies a service based on your username, project name, and the service name you've defined under the `services` property. See our [Services](/docs/concepts/services) page for more about how Defang resolves service names. ::: (Optional) The networks defined in your application. This is commonly added together with a [service-level `networks`](#networks-1) property. ```yaml networks: public: ``` See our [Networking](/docs/concepts/networking) page for more. (Not yet supported) The volume mounts for a container, reusable across services. This feature is not currently supported by Defang. ```yaml", - "path": "/docs/concepts/compose" - }, - { - "id": 33, - "about": "db-data:, Compose Service-level Properties, `build`, `image`, `ports`, `command`, `deploy`, `depends_on`", - "text": "``` :::warning Defang does not support the `secrets` top-level property. Please read our [Configuration](/docs/concepts/configuration) page for more. ::: Here are a list of service-level properties of the [Compose specification](https://docs.docker.com/compose/compose-file/) that Defang supports when writing a `compose.yaml` file. :::tip Service-level means inside your `service`. A service-level property called `build` would look like: ```yaml service: build: ... ``` Note that in your Compose file, you will need a top-level property called `services` to contain all of your services. For example: ```yaml services: service: build: ... ``` ::: (Required, unless `image` is defined) The [build configuration](https://github.com/compose-spec/compose-spec/blob/main/build.md). This property describes how to create an OCI container for this service. ```yaml build: context: . dockerfile: ./Dockerfile ``` (Required, unless `build` is defined) [This property](https://github.com/compose-spec/compose-spec/blob/main/05-services.md#image) describes the image from which your container should start. ```yaml image: nginx:latest ``` (Optional, but required if you want to access the service from outside the container) The ports to expose. The default port mode is `ingress`. ```yaml ports: - mode: ingress target: 80 published: 80 ``` :::info Defang ignores `published` ports in production. As such, it is common to make `target` and `published` ports the same when using Defang. However, it can be useful to include a `published` port for local development, such as Docker. ::: (Optional) The command which will be run to start your service. If left out, the command from the Docker image will be used. ```yaml command: nginx -g 'daemon off;' ``` (Optional) The [Deploy Specification](https://github.com/compose-spec/compose-spec/blob/main/deploy.md) describes the runtime constraints and requirements for how your services will be deployed and managed across different environments (e.g. memory reservations, replicas, number of CPUs, etc.). ```yaml deploy: replicas: 1 reservations: cpus: '0.5' memory: 256M ``` (Not yet supported) This property describes startup dependencies between services. This feature is currently unsupported by Defang, but can be useful in local developments such as Docker. ```yaml", + "text": "Defang allows you to use `compose.yaml` files to deploy your application to the cloud. The `compose.yaml` file is a simple way to define and run multi-container applications. This file format may look familiar to you if you've used [Docker](https://docker.com). The [Compose Specification](https://github.com/compose-spec/compose-spec/blob/main/spec.md#compose-file) lets you define a platform-agnostic application designed as a set of containers which are configured to run together with shared resources. These applications may be destined for any [OCI](https://opencontainers.org/) Container Runtime. Defang does the heavy lifting to deploy to your favourite cloud platform using this file. You can create a `compose.yaml` file in the root of your project, or use the [`defang generate`](../tutorials/generate-new-code-using-ai.mdx) command to create one for you (along with other resources). This file is used to define your application's [services](./services.md) and how they run. You can edit this file to add more services or change the configuration of services. When you run `defang compose up`, Defang will read your `compose.yaml` file and [deploy](./deployments.md) the services named in that file to the cloud. Here is a basic `compose.yaml` file that contains all the required properties for deployment in Defang. ```yaml services: service-example: image: nginx:latest # use one of: image (shown on this line) or build (shown below) # build: # context: . # dockerfile: Dockerfile ports: - mode: ingress # specify ports to expose target: 8080 published: 8080 # this is useful for running locally ``` Here are a list of top-level properties of the [Compose specification](https://docs.docker.com/compose/compose-file/) that Defang supports when writing a `compose.yaml` file. (Required) The services defined in your application. ```yaml services: service: # add service-level properties here ``` :::info Defang identifies a service based on your username, project name, and the service name you've defined under the `services` property. See our [Services](/docs/concepts/services) page for more about how Defang resolves service names. ::: (Optional) The networks defined in your application. This is commonly added together with a [service-level `networks`](#networks-1) property. ```yaml networks: default: # the \"default\" network is always created even if not specified ``` See our [Networking](/docs/concepts/networking) page for more. (Not yet supported) The volume mounts for a container, reusable across services. This feature is not currently supported by Defang. ```yaml", "path": "/docs/concepts/compose" }, { - "id": 34, - "about": "- db, `environment`, `healthcheck`, `networks`, `restart`, `volumes`", - "text": "``` (Optional) The environment variables to set. ```yaml environment: DATABASE_USER: someuser ``` :::info For sensitive environment variables (or secret values), you should list the variable's name with a blank or `null` value, and then securely set their actual value with `defang config` in the CLI. See our [Configuration page](/docs/concepts/configuration) for more. For example: ```yaml - DATABASE_USER=someuser # env var loaded with this literal value - DATABASE_PASSWORD # env var loaded using defang config ``` ::: (Optional, but required for healthchecks on services with a published port) [This property](https://github.com/compose-spec/compose-spec/blob/main/05-services.md#healthcheck) describes a check that will be run to determine whether or not a service's containers are \"healthy\". It works in the same way, and has the same default values, as the [HEALTHCHECK Dockerfile instruction](https://docs.docker.com/engine/reference/builder/#healthcheck) set by the service's Docker image. Your Compose file can override the values set in the Dockerfile. When using Defang, your Compose file must have a healthcheck if you want to expose an `ingress` port—even if your Dockerfile already contains one. :::note `curl` is commonly used for containers with an Ubuntu-based image, and `wget` is used for containers with an `alpine`-based image. ::: ```yaml healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:8080/\"] interval: 30s timeout: 90s retries: 3 ``` or ```yaml healthcheck: test: [\"CMD\", \"wget\", \"--spider\", \"http://localhost:8080/\"] interval: 30s timeout: 90s retries: 3 ``` (Optional) The network configuration. Can be `public`, where Defang will assign a public IP address, or `private`, in which Defang will not. To avoid warnings, add this to the [top-level `networks`](#networks) property as well. ```yaml networks: public: ``` You can also assign an alias for a network by using `aliases`, as seen below: ```yaml networks: public: aliases: - app ``` See our [Networking](/docs/concepts/networking) page for more. (Optional, but highly recommended) The restart mode for a container. Defaults to `unless-stopped` unless otherwise specified. ```yaml restart: unless-stopped ``` (Not yet supported) The volume mounts for a container, specific to a service. This feature is not currently supported by Defang. ```yaml", - "path": "/docs/concepts/compose" - }, - { - "id": 35, - "about": "- \"./backend:/app\"", - "text": "```", + "id": 67, + "about": "db-data:, Compose Service-level Properties, `build`, `image`, `ports`, `command`, `deploy`, `depends_on`, `environment`, `healthcheck`, `networks`, `restart`, `volumes`", + "text": "``` :::warning Defang does not support the `secrets` top-level property. Please read our [Configuration](/docs/concepts/configuration) page for more. ::: Here are a list of service-level properties of the [Compose specification](https://docs.docker.com/compose/compose-file/) that Defang supports when writing a `compose.yaml` file. :::tip Service-level means inside your `service`. A service-level property called `build` would look like: ```yaml service: build: … ``` Note that in your Compose file, you will need a top-level property called `services` to contain all of your services. For example: ```yaml services: service: build: … ``` ::: (Required, unless `image` is defined) The [build configuration](https://github.com/compose-spec/compose-spec/blob/main/build.md). This property describes how to create an OCI container for this service. ```yaml build: context: . dockerfile: ./Dockerfile ``` (Required, unless `build` is defined) [This property](https://github.com/compose-spec/compose-spec/blob/main/05-services.md#image) describes the image from which your container should start. ```yaml image: nginx:latest ``` (Optional, but required if you want to access the service from outside the container) The ports to expose. The default port mode is `ingress`. ```yaml ports: - mode: ingress target: 80 published: 80 ``` :::info Defang ignores `published` ports in production. As such, it is common to make `target` and `published` ports the same when using Defang. However, it can be useful to include a `published` port for local development, such as Docker. ::: (Optional) The command which will be run to start your service. If left out, the command from the Docker image will be used. ```yaml command: nginx -g 'daemon off;' ``` (Optional) The [Deploy Specification](https://github.com/compose-spec/compose-spec/blob/main/deploy.md) describes the runtime constraints and requirements for how your services will be deployed and managed across different environments (e.g. memory reservations, replicas, number of CPUs, etc.). ```yaml deploy: replicas: 1 reservations: cpus: '0.5' memory: 256M ``` (Limited support) This property describes startup dependencies between services. This feature currently has limited supported by Defang: dependency on a managed service does not wait for the managed service provisioning to complete. ```yaml # depends_on: # - db ``` (Optional) The environment variables to set. ```yaml environment: DATABASE_USER: someuser ``` :::info For sensitive environment variables (or secret values), you should list the variable's name with a blank or `null` value, and then securely set their actual value with `defang config` in the CLI. See our [Configuration page](/docs/concepts/configuration) for more. For example: ```yaml - DATABASE_USER=someuser # env var loaded with this literal value - DATABASE_PASSWORD # env var loaded using defang config ``` ::: (Optional, but required for healthchecks on services with a published port) [This property](https://github.com/compose-spec/compose-spec/blob/main/05-services.md#healthcheck) describes a check that will be run to determine whether or not a service's containers are \"healthy\". It works in the same way, and has the same default values, as the [HEALTHCHECK Dockerfile instruction](https://docs.docker.com/engine/reference/builder/#healthcheck) set by the service's Docker image. Your Compose file can override the values set in the Dockerfile. When using Defang, your Compose file must have a healthcheck if you want to expose an `ingress` port—even if your Dockerfile already contains one. :::note `curl` is commonly used for containers with an Ubuntu-based image, and `wget` is used for containers with an `alpine`-based image. ::: ```yaml healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:8080/\"] interval: 30s timeout: 90s retries: 3 ``` or ```yaml healthcheck: test: [\"CMD\", \"wget\", \"--spider\", \"http://localhost:8080/\"] interval: 30s timeout: 90s retries: 3 ``` (Optional) The service network configuration. By default, Compose will add services to the `default` network, which has external connectivity. You can also add services to private networks. To avoid warnings, you should add them to the [top-level `networks`](#networks) property as well. ```yaml networks: default: # when not specified, services are assigned to the \"default\" network ``` You can also assign an alias for a network by using `aliases`, as seen below: ```yaml networks: default: aliases: - app ``` See our [Networking](/docs/concepts/networking) page for more. (Optional, but highly recommended) The restart mode for a container. Defaults to `unless-stopped` unless otherwise specified. ```yaml restart: unless-stopped ``` (Not yet supported) The volume mounts for a container, specific to a service. This feature is not currently supported by Defang. ```yaml # volumes: # - \"./backend:/app\" ```", "path": "/docs/concepts/compose" }, { - "id": 36, + "id": 68, "about": "Configuration", "text": "Defang allows you to configure your application using environment variables. You can set environment variables in your [`compose.yaml` file](./compose.md), or in your [Pulumi program](#using-config-with-pulumi).", "path": "/docs/concepts/configuration" }, { - "id": 37, + "id": 69, "about": "Sensitive Config Values", "text": "The Defang CLI allows you to securely store sensitive information such as API keys, passwords, and other credentials. To do so, run: ```bash", "path": "/docs/concepts/configuration" }, { - "id": 38, + "id": 70, "about": "Set a configuration value called API_KEY, With List Notation, With Map Notation, Interpolation, Using Config with Pulumi, Connecting Services, Providers", "text": "defang config set API_KEY ``` You can use sensitive config by specifying them in the `environment` section of a service in a `compose.yaml` file without any value, or by specifying an environment key with a `null` value in your Pulumi code. Either one of list notation or map notation is acceptable for defining your environment variable(s). See below for an example of each. ```yaml services: service1: image: image1:latest environment: - API_KEY ``` ```yaml services: service1: image: image1:latest environment: API_KEY: ``` Use the `defang config` command of the Defang CLI to manage the values. :::tip You can find a sample of how to set sensitive config values [here](https://github.com/DefangLabs/samples/tree/main/samples/nodejs-openai). ::: :::info If you are using the [1-Click Deploy](/docs/tutorials/using-one-click-deploy) option, you can set sensitive config values as secrets in your GitHub repository and the action will automatically deploy them for you. [Learn how to manage config values with the Defang Github Action](https://github.com/DefangLabs/defang-github-action?tab=readme-ov-file#managing-config-values). ::: Environment variables are set within the `environment` section of a service in a `compose.yaml` file. Any variables declared here will become available within the service container. Variables can be set by assigning a literal value, a reference to a configuration value, or a mix of literal and variable references. Variable references are declared using either **\\$\\{variable_name\\}** or **$variable_name** forms. It is recommended to use the bracketed form. By interpolating over variable references within a string we can construct complex strings. Interpolation may be particularly useful when constructing connection strings to other services. ``` service: environment: - USER_PASSWORD // configuration variable - USER_NAME // configuration variable - CONNECT=dbservice:${USER_NAME}:${USER_PASSWORD}@example.com:9876 ``` In the example above, if we assume the value of the configuration variable ***USER_PASSWORD*** is *password* then the value assigned to ***CONNECT*** will resolve to *dbservice:alice:password@example.com:9876* During `defang compose up` all variable references will be replaced with the actual value and made available in the container. If any referenced variable is not found the `defang compose up` command will be canceled. In Defang, using config with [Pulumi](./pulumi.md) gives you the advantage of being able to manage your environment variables across different environments using Pulumi stacks. :::tip You can find a sample of how to set environment variables with Pulumi [here](https://github.com/DefangLabs/samples/tree/main/samples/pulumi-remix-postgres). ::: If you have created a service before a secret you can connect it by running the `defang compose start` command if using the [`defang compose` workflow](./compose.md). If you are using the [Pulumi-based workflow](./pulumi.md) you will need to redeploy using Pulumi. Here are the different ways sensitive config values are stored depending on the provider you are using: * [AWS](../providers/aws/aws.md#secrets) * [DigitalOcean](../providers/digitalocean#secrets) * [GCP](../providers/gcp#secrets) :::info Please note that while Defang supports setting sensitive config, it does not support the [`secrets`](https://docs.docker.com/reference/compose-file/secrets/) top-level element as seen in the Compose specification. Please see our [Compose](/docs/concepts/compose) page for more details. :::", "path": "/docs/concepts/configuration" }, { - "id": 39, + "id": 71, + "about": "Scaling, Why Scale?, Types of Scaling, Auto-Scaling, Example, How It Works, Supported Platforms, Benefits of Auto-Scaling, Considerations", + "text": "Scaling is the process of adjusting the number of instances (or replicas) of a service to meet the current demand. Services that receive requests—such as APIs, workers, or background jobs—can be scaled up or down to optimize performance, availability, and cost. Scaling is a core concept in distributed systems and cloud-native applications. It ensures your system can handle varying workloads without degrading user experience or over-provisioning resources. Scaling enables services to respond effectively under different conditions: - **High Traffic**: When demand spikes, scaling up ensures your service can process more requests in parallel. - **Cost Optimization**: Scaling down during periods of low demand helps reduce unnecessary resource usage and cloud costs. - **Fault Tolerance**: Multiple instances of a service provide redundancy in case of instance failure. - **Throughput & Latency**: Additional instances can reduce response times and increase the number of operations your service can perform per second. There are two main ways to scale a service: - **Horizontal Scaling**: Adds or removes instances of a service. This is the most common approach for stateless services. - **Vertical Scaling**: Increases or decreases the resources (CPU, memory) available to a single instance. In most modern deployments, horizontal scaling is preferred because it aligns well with cloud-native principles and is easier to automate and distribute. **Auto-scaling** refers to automatically adjusting the number of service instances based on defined policies or metrics. Instead of manually adding more instances when traffic increases, an auto-scaling system watches key indicators (like CPU usage) and takes action in real time. With Defang, users on the Pro plan or higher can enable service-level autoscaling in three steps: 1. Add the _**x-defang-autoscaling : true**_ extension to the service you want to autoscale. 2. Remove any _**replicas**_ field in the _**deploy**_ mapping (if present). 3. Deploy using staging or production [mode](/docs/concepts/deployments#deployment-modes). (e.g. ```defang compose up --provider=aws --mode=production```) ```yaml services: service-example: x-defang-autoscaling: true #enable autoscaling for this service build: context: . dockerfile: Dockerfile ports: - mode: ingress target: 8080 published: 8080 ``` Auto-scaling systems typically rely on: - **Metrics Collection**: Real-time monitoring of system metrics. - **Scaling Policies**: Rules that define when to scale up or down. For example: - If average CPU > 85% for 5 minutes → scale up by 2 instances. - **Cooldown Periods**: Delays between scaling events to prevent rapid, repeated changes (flapping). | Platform | Auto-Scaling Support | |----------------|:----------------------:| | Playground | ❌ | | AWS | ✅ | | DigitalOcean | ❌ | | GCP | ✅ | - **Elasticity**: Automatically adapts to changing workloads. - **Resilience**: Helps maintain performance during traffic surges or partial outages. - **Efficiency**: Reduces the need for manual intervention or over-provisioning. - Ensure services are **stateless** or use **externalized state** (e.g., databases, caches) for smooth scaling. ([12 Factor App](https://12factor.net/processes)) - Test services under load to identify scaling bottlenecks. See Tutorial page [Scaling Tutorial](/docs/tutorials/scaling-your-services)", + "path": "/docs/concepts/scaling" + }, + { + "id": 72, "about": "Defang Playground, Limitations, No Apex Domain Support, Max Resources", "text": "Defang aims to make it easier to deploy your services to the cloud. Specifically, Defang's goal is to make it easier to deploy your workloads to your *own* cloud accounts. We refer to this as bring-your-own-cloud (BYOC) which you can read about in more depth [here](./defang-byoc). We also provide Defang Playground, but it is only intended for non-production workloads so you can get a feel for how Defang works. Defang Playground is a free tier that allows you to deploy services to a Defang-hosted cloud account without needing to manage your own. It is intended for non-production workloads only. :::warning Please note that the Defang Playground is intended for testing/learning purposes only. The environment may be reset at any time. Do not run production workloads in Defang Playground. ::: There are a few limitations to Defang Playground. These limitations are in place to ensure that Defang Playground is only used for non-production workloads. If you are interested in running production workloads, you should use Defang BYOC. When you deploy a service to Defang Playground, it will be assigned a domain under the `defang.dev` domain. We do not support pointing your own apex domains to Defang Playground services, but you can use subdomains. * Projects: 1 * Services: 4 * CPUs: 2 * Memory: 1GiB * Replicas: 1", "path": "/docs/concepts/defang-playground" }, { - "id": 40, + "id": 73, "about": "Services, Service Name Resolution, Service Deployment, Service Status", "text": "Defang allows you deploy services defined as containers. You can define your services using a [Compose file](./compose.md) or a [Pulumi program](./pulumi.md). Services can be exposed to the internet or kept private, and can communicate between themselves using certain conventions for hostnames. Defang identifies services by using your [account username](/docs/concepts/accounts), [project name](/docs/concepts/projects), and service name. The port is included in the [domain](/docs/concepts/domains) for the service. :::tip Service names are defined in your Compose file or in your Pulumi program. ::: Domains for services in Playground follow the following pattern: ``` ---.prod1a.defang.dev ``` Domains for services in Defang BYOC follow the following pattern: ``` --...defang.app ``` Defang manages the deployment process for services. You can learn more about how services are deployed in the [Deployment page](./deployments.md). :::info We plan to add support for other types of services in the future, including serverless functions. ::: An overview of the possible statuses for a service in Defang. | Status | Details | |-|-| | BUILD_QUEUED | The service update has been received and is now in the queue for its image to be built. | | BUILD_PROVISIONING | The container orchestrator is provisioning the necessary resources for building your service's image. | | BUILD_PENDING | The necessary resources to build your service have been provisioned but the build has not yet been initiated. | | BUILD_ACTIVATING | The container orchestrator is pulling the build container's image and creating the build container. | | BUILD_RUNNING | The container which builds your service's image is successfully running. | | BUILD_STOPPING | The container orchestrator has sent a termination signal to the build container and is waiting for the build process to stop. | | BUILD_FAILED | The build container exited with a non-zero status code. | | UPDATE_QUEUED | The service update has been received and is now in the queue. | | SERVICE_DEPLOYMENT_PENDING | The necessary resources to run your service have been provisioned but the service has not yet been initiated. | | SERVICE_DEPLOYMENT_COMPLETED | Your service has been deployed and is healthy. | | SERVICE_DEPLOYMENT_FAILED | Your service could not be deployed. | :::tip You can run the `defang compose ps` command to view the status of your services. :::", "path": "/docs/concepts/services" }, { - "id": 41, + "id": 74, "about": "Defang BYOC, Configuration", "text": "Defang aims to make it easier to deploy your services to the cloud. Specifically, Defang's goal is to make it easier to deploy your workloads to your *own* cloud accounts. We refer to this as bring-your-own-cloud (BYOC). We also provide Defang Playground, but it is only intended for non-production workloads so you can get a feel for how Defang works. Defang provisions and configures the necessary native managed services from your cloud provider to get your services up and running. For example, on AWS, Defang will configure an [ALB](https://aws.amazon.com/elasticloadbalancing/application-load-balancer/), set up [ECR](https://aws.amazon.com/ecr/), configure [CloudWatch](https://aws.amazon.com/cloudwatch/?nc2=type_a), and run your service on [ECS](https://aws.amazon.com/ecs/?nc2=type_a) and more. The following lists the existing and planned support for cloud providers. :::info Defang does not install or run any Defang or third party services at runtime. Defang does run the Defang build service to build your container images, which terminates after every build. ::: The Defang CLI can be configured to deploy to a cloud provider either by using the `--provider` flag or by setting the `DEFANG_PROVIDER` environment variable. For example: ```bash", "path": "/docs/concepts/defang-byoc" }, { - "id": 42, + "id": 75, "about": "Default is auto, which deploys to the Defang Playground but warns if it detects cloud credentials", "text": "$ defang compose up --provider=aws", "path": "/docs/concepts/defang-byoc" }, { - "id": 43, + "id": 76, "about": "or, AWS, DigitalOcean, GCP, Azure", - "text": "$ export DEFANG_PROVIDER=digitalocean ``` Please read the [AWS Provider](../providers/aws/aws.md) documentation for more details about how the AWS provider works and how to get started. :::success AWS Free Tier & Credits You can use the AWS Free Tier to try out Defang. Learn more about it [here](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc&awsf.Free%20Tier%20Types=*all&awsf.Free%20Tier%20Categories=*all). If you're an elligible startup, you can sign up for credits [here](https://aws.amazon.com/startups/sign-up?referrer_url_path=%2Fstartups). ::: :::info The Defang DigitalOcean Provider is available for Public Preview as of October 2024. ::: Please read the [DigitalOcean Provider](../providers/digitalocean/digitalocean.md) documentation for more details about how the DigitalOcean provider works and how to get started. :::success DigitalOcean Credits You can get DigitalOcean credits to try out Defang. Learn more about it on their [pricing page](https://www.digitalocean.com/pricing). If you're an elligible startup, you can sign up for credits [here](https://www.digitalocean.com/hatch). ::: :::info The Defang GCP Provider is available for Public Preview as of December 2024. ::: Please check out the [GCP Provider](../providers/gcp/) page for more details. :::success GCP Free Tier & Credits You can use the GCP Free Tier to try out Defang. Learn more about it [here](https://cloud.google.com/free). If you're an elligible startup, you can sign up for credits [here](https://cloud.google.com/developers/startups). ::: :::info We will be working on Azure support in the future. If you are interested in Azure support, please vote on [this issue](https://github.com/DefangLabs/defang/issues/57). :::", + "text": "$ export DEFANG_PROVIDER=digitalocean ``` Please read the [AWS Provider](../providers/aws/aws.md) documentation for more details about how the AWS provider works and how to get started. :::tip[AWS Free Tier & Credits] You can use the AWS Free Tier to try out Defang. Learn more about it [here](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc&awsf.Free%20Tier%20Types=*all&awsf.Free%20Tier%20Categories=*all). If you're an eligible startup, you can sign up for credits [here](https://aws.amazon.com/startups/sign-up?referrer_url_path=%2Fstartups). ::: :::info The Defang DigitalOcean Provider is available for Public Preview as of October 2024. ::: Please read the [DigitalOcean Provider](../providers/digitalocean/digitalocean.md) documentation for more details about how the DigitalOcean provider works and how to get started. :::tip[DigitalOcean Credits] You can get DigitalOcean credits to try out Defang. Learn more about it on their [pricing page](https://www.digitalocean.com/pricing). If you're an eligible startup, you can sign up for credits [here](https://www.digitalocean.com/hatch). ::: :::info The Defang GCP Provider is available for Public Preview as of December 2024. ::: Please check out the [GCP Provider](../providers/gcp/) page for more details. :::tip[GCP Free Tier & Credits] You can use the GCP Free Tier to try out Defang. Learn more about it [here](https://cloud.google.com/free). If you're an eligible startup, you can sign up for credits [here](https://cloud.google.com/developers/startups). ::: :::info We will be working on Azure support in the future. If you are interested in Azure support, please vote on [this issue](https://github.com/DefangLabs/defang/issues/57). :::", "path": "/docs/concepts/defang-byoc" }, { - "id": 44, + "id": 77, "about": "Observability, Tail, Architecture", "text": "You can easily monitor and debug your Defang services at build and runtime using the Defang CLI and Portal. When you deploy a service using the `defang compose up`, the CLI will automatically start tailing the build and runtime logs for your service. You can also view the logs for your service in the Portal, or by using the `defang tail` command. :::tip Defang has a tutorial on how you can [monitor your services' status and logs](/docs/tutorials/monitoring-your-services). ::: :::warning Keep in mind that the [Defang Portal](./portal.md) only displays services deployed to Defang Playground. ::: The `defang tail` command will tail the logs for all your services by default. You can also specify a service `--name` to tail the logs for a specific service. If you specify the `--etag` the CLI will only tail the logs for a specific build of a service. ``` defang tail --name my-service defang tail --etag ua119053ehi2 ``` In [Defang BYOC](./defang-byoc.md), output is logged to the native logging tools within your cloud provider. The CLI then tails the output as needed.", "path": "/docs/concepts/observability" }, { - "id": 45, + "id": 78, + "about": "Cost Estimation, Generating an Estimate, Deploying your project", + "text": "Defang enables you to estimate the cost of deploying and running your project without needing to create an account with your cloud provider. :::info Currently, AWS is the only provider supported for cost estimation. Support for GCP and Digital Ocean is coming soon. ::: Navigate your shell to your application's working directory and run ``` defang estimate [--provider aws] [--mode affordable|balanced|high_availability] ``` Here is an example of the output you would see if you estimated the cost deploying the [django-postgres](https://github.com/DefangLabs/samples/tree/main/samples/django-postgres) sample using the `balanced` [deployment mode](/docs/concepts/deployment-modes). ``` defang estimate --provider=aws --mode=balanced * Packaging the project files for django at /Users/defang/samples/samples/django-postgres/app * Generating deployment preview * Preparing estimate Estimate for Deployment Mode: BALANCED This mode strikes a balance between cost and availability. Your application will be deployed with spot instances. Databases will be provisioned using resources optimized for production. Services in the \"internal\" network will be deployed to a private subnet with a NAT gateway for outbound internet access. Cost Quantity Service Description $16.43 730 Hours (shared) AWSELB USW2-LoadBalancerUsage $32.85 730 Hours (shared) AmazonEC2 USW2-NatGateway-Hours $25.00 100 %Utilized/mo db AmazonRDS USW2-InstanceUsage:db.r5.large $1.62 14600 GB-Hours django AmazonECS USW2-Fargate-EphemeralStorage-GB-Hours (20 GB * 730 hours) $1.62 365 GB-Hours django AmazonECS USW2-Fargate-GB-Hours (0.50 GB * 730 hours) -$1.14 365 GB-Hours django AmazonECS USW2-Fargate-GB-Hours-SpotDiscount (Estimated @ 70%) $7.39 182.50 vCPU-Hours django AmazonECS USW2-Fargate-vCPU-Hours:perCPU (0.25 vCPU * 730 hours) -$5.17 182.50 vCPU-Hours django AmazonECS USW2-Fargate-vCPU-Hours:perCPU-SpotDiscount (Estimated @ 70%) Estimated Monthly Cost: $78.60 (+ usage) Estimate does not include taxes or Discount Programs. To estimate other modes, use defang estimate --mode=affordable|balanced|high_availability For help with warnings, check our FAQ at https://s.defang.io/warnings ``` This estimate will include a line item for the most significant monthly costs associated with the services described in your project's compose file. If you modify the [`deploy.resources.reservations`](/docs/concepts/compose#deploy) section of your compose file, to increase or decrease replicas, CPU or memory allocations, those changes will be reflected in this estimate. :::info Some charges will not be included in this estimate, including charges related to usage like data transfer and storage costs. ::: Now that you have estimated the costs associated with your project. You are ready to deploy to your target cloud. ``` defang compose up [--provider aws|gcp|digitalocean] [--mode affordable|balanced|high_availability] ```", + "path": "/docs/concepts/estimation" + }, + { + "id": 79, "about": "Security, Roles & Permissions, Networking, SSL, Secrets", "text": "Defang's philosophy is to operate on a principle of \"least-privilege\". This means that we only give your services the permissions they need to operate. Because Defang creates roles, you need to have the appropriate permissions to create roles in your cloud provider account, typically the `AdministratorAccess` policy in AWS. :::tip Best practice is to run the Defang CLI in a CI/CD environment and to restrict deployment permissions at that level. ::: Defang creates roles for your services to use, and attaches policies to those roles. This means that your services only have the permissions they need to operate, and nothing more. Defang configures Security Groups, deploys applictions to a private subnet and uses an Application Load Balancer to route traffic to your services from the public internet only when required. Defang automates the process of obtaining and renewing SSL certificates for your services using AWS Certificate Manager. Secrets are stored in AWS Systems Manager Parameter Store, and are encrypted using a key stored in AWS Key Management Service (KMS).", "path": "/docs/concepts/security" }, { - "id": 46, + "id": 80, "about": "Managed Redis, Current Support, How to use Managed Redis, Final Snapshots", - "text": "Redis is an in-memory data structure store widely used for caching, real-time analytics, and session management due to its high performance, low latency, and support for various data types. Defang can help you provision a managed Redis instance. | Provider | Managed Redis | | --- | --- | | [Playground](/docs/providers/playground#managed-services) | ❌ | | [AWS](/docs/providers/aws#managed-storage) | ✅ | | [DigitalOcean](/docs/providers/digitalocean#future-improvements) | ❌ | | [GCP](/docs/providers/gcp#future-improvements) | ✅ | To use managed Redis, in your `compose.yaml` file, use the `x-defang-redis` extension to define your Redis service. Adding the extension will tell Defang to provision a managed instance, rather than running Redis as a service. Defang will use the image tag to determine the version to provision from your cloud provider. Here's an example: ```yaml cache: image: redis:6.2 x-defang-redis: true restart: unless-stopped ports: - mode: host target: 6379 ``` When a project is deployed with the `production` [deployment mode](/docs/concepts/deployment-modes), any managed Redis instances are automatically configured to create a snapshot of the datastore before deletion. The snapshot will be named with the following format: ` --redis--final-snapshot ` The AWS Console can be used to restore a snapshot into a new instance of Redis.", + "text": "Redis is an in-memory data structure store widely used for caching, real-time analytics, and session management due to its high performance, low latency, and support for various data types. Defang can help you provision a managed Redis instance. | Provider | Managed Redis | | --- | --- | | [Playground](/docs/providers/playground#managed-services) | ⚠️ Unmanaged | | [AWS](/docs/providers/aws#managed-storage) | ✅ Elasticache | | [DigitalOcean](/docs/providers/digitalocean#future-improvements) | ⚠️ Unmanaged | | [GCP](/docs/providers/gcp#managed-services) | ✅ Memorystore | To use managed Redis, in your `compose.yaml` file, use the `x-defang-redis` extension to define your Redis service. Adding the extension will tell Defang to provision a managed instance, rather than running Redis as a service. Defang will use the image tag to determine the version to provision from your cloud provider. Here's an example: ```yaml cache: image: redis:6.2 x-defang-redis: true restart: unless-stopped ports: - mode: host target: 6379 ``` When a project is deployed with the `production` [deployment mode](/docs/concepts/deployment-modes), any managed Redis instances are automatically configured to create a snapshot of the datastore before deletion. The snapshot will be named with the following format: ` --redis--final-snapshot ` The AWS Console can be used to restore a snapshot into a new instance of Redis. This feature is not yet supported on GCP.", "path": "/docs/concepts/managed-storage/managed-redis" }, { - "id": 47, + "id": 81, "about": "Managed Storage", - "text": "Defang helps you provision the infrastructure you need to run your services. That infrastructure is designed to scale in and out without persistent storage, so you can build highly scalable services. But Defang can also help you provision managed services to store and persist your data, like [caches](./managed-redis.md), [databases](./managed-postgres.md), and [object storage](./managed-object-storage.md).", + "text": "Defang helps you provision the infrastructure you need to run your services. That infrastructure is designed to scale in and out without persistent storage, so you can build highly scalable services. But Defang can also help you provision managed services to store and persist your data, like [caches](./managed-redis.md), [databases](./managed-postgres.mdx), and [object storage](./managed-object-storage.md).", "path": "/docs/concepts/managed-storage/managed-storage" }, { - "id": 48, + "id": 82, + "about": "Managed Postgres, Current Support, How to use Managed Postgres, Required Configuration, Optional Configuration, Connecting to Managed Postgres, Example, Final Snapshots, Major Version Updating of Engine", + "text": "Postgres, or PostgreSQL, is a powerful open-source relational database system known for its robustness, extensibility, and compliance with SQL standards, making it ideal for complex applications requiring reliable data integrity and advanced querying. Defang can help you provision a managed Postgres instance. | Provider | Managed Postgres | | --- | --- | | [Playground](/docs/providers/playground#managed-services) | ⚠️ Unmanaged | | [AWS](/docs/providers/aws#managed-storage) | ✅ RDS Postgres | | [DigitalOcean](/docs/providers/digitalocean#future-improvements) | ⚠️ Unmanaged | | [GCP](/docs/providers/gcp#managed-storage) | ✅ Cloud SQL Postgres | To use managed Postgres, in your `compose.yaml` file, use the `x-defang-postgres` extension to define your Postgres service. Adding the extension will tell Defang to provision a managed instance, rather than running Postgres as a service. When using managed Postgres, you **must** set a password for the database using `defang config set POSTGRES_PASSWORD`. If you do not provide the password, the deployment will fail. - `POSTGRES_PASSWORD`: You can can assign the password in the service's environment variables. To learn more about how this works, read about [configuration](../configuration.md). You can also set the following optional environment variables to configure the managed Postgres instance: - `POSTGRES_USER`: The user for the managed Postgres instance. The default is `postgres`. - `POSTGRES_DB`: The database name for the managed Postgres instance. The default is `postgres`. You can connect to the managed Postgres instance using the name of your service as the hostname, `POSTGRES_USER`, `POSTGRES_DB`, and `POSTGRES_PASSWORD` environment variables. :::info For a smoother experience with Defang, we recommend using Postgres 14 for your container images. This version provides easier access and improved usability. ::: ```yaml app: # [...] environment: POSTGRES_HOST: database POSTGRES_USER: postgres POSTGRES_DB: postgres // highlight-start # Note: by leaving the value empty, Defang will use the # value set using `defang config set POSTGRES_PASSWORD` POSTGRES_PASSWORD: // highlight-end # Note: you can create a connection string by using interpolation, # reference config variables by using ${} CONNECTURL: postgresql://postgres:${POSTGRES_PASSWORD}@database:5432/postgres?sslmode=require database: image: postgres:14 x-defang-postgres: true ports: - mode: host target: 5432 environment: // highlight-start # Note: by leaving the value empty, Defang will use the # value set using `defang config set POSTGRES_PASSWORD` POSTGRES_PASSWORD: // highlight-end ``` When a project is deployed to a [production environment](/docs/concepts/deployment-modes), any managed Postgres instances are automatically configured to create a snapshot of the database before deletion. The snapshot will be named with the following format: ` --postgres--final-snapshot ` The AWS Console can be used to restore a snapshot into a new instance of Postgres. This feature is not yet supported in GCP. {/* To update the database engine you can update the image to a later version in your Compose file and apply it via ```defang compose up --provider=aws```. In the example below, we change from Postgres 15 to 16. Please note the upgrading will occur immediately and may result in the database being unavailable for some time. ``` database: image: postgres:15 ``` to ``` database: image: postgres:16 ``` */}", + "path": "/docs/concepts/managed-storage/managed-postgres" + }, + { + "id": 83, "about": "Managed Object Storage, Current Support", - "text": ":::info Not Yet Supported As of September 2024, Defang does not yet support managed Object Storage, but it is on our roadmap. If you are interested in Object Storage support, please vote on [this issue](https://github.com/DefangLabs/defang/issues/688). ::: Managed Object Storage, like AWS S3, is a service that allows you to store and retrieve large amounts of data. Object storage is ideal for storing unstructured data like images, videos, and backups. | Provider | Managed Object Storage | | --- | --- | | [Playground](/docs/providers/playground#managed-services) | ❌ | | [AWS](/docs/providers/aws#managed-storage) | ❌ | | [DigitalOcean](/docs/providers/digitalocean#future-improvements) | ❌ | | [GCP](/docs/providers/gcp#future-improvements) | ❌ |", + "text": ":::info[Not Yet Supported] As of September 2024, Defang does not yet support managed Object Storage, but it is on our roadmap. If you are interested in Object Storage support, please vote on [this issue](https://github.com/DefangLabs/defang/issues/688). ::: Managed Object Storage, like AWS S3, is a service that allows you to store and retrieve large amounts of data. Object storage is ideal for storing unstructured data like images, videos, and backups. | Provider | Managed Object Storage | | --- | --- | | [Playground](/docs/providers/playground#managed-services) | ❌ | | [AWS](/docs/providers/aws#managed-storage) | ❌ | | [DigitalOcean](/docs/providers/digitalocean#future-improvements) | ❌ | | [GCP](/docs/providers/gcp#future-improvements) | ❌ |", "path": "/docs/concepts/managed-storage/managed-object-storage" }, { - "id": 49, - "about": "Managed Postgres, Current Support, How to use Managed Postgres, Required Configuration, Optional Configuration, Connecting to Managed Postgres, Example, Final Snapshots, Major Version Updating of Engine", - "text": "Postgres, or PostgreSQL, is a powerful open-source relational database system known for its robustness, extensibility, and compliance with SQL standards, making it ideal for complex applications requiring reliable data integrity and advanced querying. Defang can help you provision a managed Postgres instance. | Provider | Managed Postgres | | --- | --- | | [Playground](/docs/providers/playground#managed-services) | ❌ | | [AWS](/docs/providers/aws#managed-storage) | ✅ | | [DigitalOcean](/docs/providers/digitalocean#future-improvements) | ❌ | | [GCP](/docs/providers/gcp#future-improvements) | ✅ | To use managed Postgres, in your `compose.yaml` file, use the `x-defang-postgres` extension to define your Postgres service. Adding the extension will tell Defang to provision a managed instance, rather than running Postgres as a service. When using managed Postgres, you **must** set a password for the database using `defang config set POSTGRES_PASSWORD`. If you do not provide the password, the deployment will fail. - `POSTGRES_PASSWORD`: You can can assign the password in the service's environment variables. To learn more about how this works, read about [configuration](../configuration.md). You can also set the following optional environment variables to configure the managed Postgres instance: - `POSTGRES_USER`: The user for the managed Postgres instance. The default is `postgres`. - `POSTGRES_DB`: The database name for the managed Postgres instance. The default is `postgres`. You can connect to the managed Postgres instance using the name of your service as the hostname, `POSTGRES_USER`, `POSTGRES_DB`, and `POSTGRES_PASSWORD` environment variables. :::info For a smoother experience with Defang, we recommend using Postgres 14 for your container images. This version provides easier access and improved usability. ::: ```yaml app: # [...] environment: POSTGRES_HOST: database POSTGRES_USER: postgres POSTGRES_DB: postgres // highlight-start # Note: by leaving the value empty, Defang will use the # value set using `defang config set POSTGRES_PASSWORD` POSTGRES_PASSWORD: // highlight-end # Note: you can create a connection string by using interpolation, # reference config variables by using ${} CONNECTURL: postgresql://postgres:${POSTGRES_PASSWORD}@database:5432/postgres?sslmode=require database: image: postgres:14 x-defang-postgres: true ports: - mode: host target: 5432 environment: // highlight-start # Note: by leaving the value empty, Defang will use the # value set using `defang config set POSTGRES_PASSWORD` POSTGRES_PASSWORD: // highlight-end ``` When a project is deployed to a [production environment](/docs/concepts/deployment-modes), any managed Postgres instances are automatically configured to create a snapshot of the database before deletion. The snapshot will be named with the following format: ` --postgres--final-snapshot ` The AWS Console can be used to restore a snapshot into a new instance of Postgres. ", - "path": "/docs/concepts/managed-storage/managed-postgres" + "id": 84, + "about": "Managed MongoDB, Current Support, How to use Managed MongoDB, Required Configuration, Example", + "text": ":::info This feature was introduced in Defang v1.2.0 released on June 3, 2025. [Upgrade to the latest version](/docs/cli/defang_upgrade) ::: Managed MongoDB is a service that allows you to store and retrieve large amounts of data in a document-oriented format. MongoDB is ideal for storing unstructured data like JSON documents, making it a popular choice for modern applications. | Provider | Managed MongoDB | | --- | --- | | [Playground](/docs/providers/playground#managed-services) | ⚠️ Unmanaged | | [AWS](/docs/providers/aws#managed-storage) | ✅ DocumentDB | | [DigitalOcean](/docs/providers/digitalocean#future-improvements) | ⚠️ Unmanaged | | [GCP](/docs/providers/gcp#future-improvements) | ⚠️ Unmanaged | To use managed MongoDB, in your `compose.yaml` file, use the `x-defang-mongodb` extension to define your MongoDB service. Adding the extension will tell Defang to provision a managed instance, rather than running MongoDB as a container. When using managed MongoDB, you **must** set a username and password for the database. By default, these are read from the `MONGO_INITDB_ROOT_USERNAME` and `MONGO_INITDB_ROOT_PASSWORD` config variables, conform [the official MongoDB container image](https://hub.docker.com/_/mongo). These can be set using the `defang config set MONGO_INITDB_ROOT_USERNAME` and `defang config set MONGO_INITDB_ROOT_PASSWORD` commands. If you do not provide these values, the deployment will fail. ```yaml services: app: # [...] environment: MONGO_URI: mongodb://$MONGO_INITDB_ROOT_USERNAME:$MONGO_INITDB_ROOT_PASSWORD@db:27017/ depends_on: - db db: image: mongo:5 x-defang-mongodb: true ``` :::warning[Version] AWS DocumentDB is compatible with MongoDB 3.6, 4.0, and 5.0. The `mongo:5` image is compatible with DocumentDB 5.0, so we recommend using this version for your MongoDB service. Deployment will fail for versions higher than 5. :::", + "path": "/docs/concepts/managed-storage/managed-mongodb" }, { - "id": 50, + "id": 85, + "about": "OpenAI Access Gateway, Docker Model Provider Services, Model Mapping, Current Support", + "text": "Defang makes it easy to deploy on your favourite cloud's managed LLM service with our [OpenAI Access Gateway](https://github.com/DefangLabs/openai-access-gateway). This service sits between your application and the cloud service and acts as a compatibility layer. It handles incoming OpenAI requests, translates those requests to the appropriate cloud-native API, handles the native response, and re-constructs an OpenAI-compatible response. See [our tutorial](/docs/tutorials/deploy-openai-apps) which describes how to configure the OpenAI Access Gateway for your application. As of Docker Compose v2.35 and Docker Desktop v4.41, Compose introduces a new service type called `provider` that allows you to declare platform capabilities required by your application. For AI models, you use the `model` type to declare model dependencies. This will expose an OpenAI compatible API for your service. Check the [Docker Model Runner documentation](https://docs.docker.com/compose/how-tos/model-runner/) for more details. ```yaml services: chat: build: . depends_on: - ai_runner ai_runner: provider: type: model options: model: ai/mistral x-defang-llm: true ``` Under the hood, when you use the `model` provider, Defang will deploy the **OpenAI Access Gateway** in a private network. This allows you to use the same code for both local development and cloud deployment. The `x-defang-llm` extension is used to configure the appropriate roles and permissions for your service. See the [Managed Language Models](/docs/concepts/managed-llms/managed-language-models/) page for more details. Defang supports model mapping through the [openai-access-gateway](https://github.com/DefangLabs/openai-access-gateway) on AWS and GCP. This takes a model with a [Docker naming convention](https://hub.docker.com/catalogs/models) (e.g. `ai/llama3.3`) and maps it to the closest matching model name on the target platform. If no such match can be found, it can fallback onto a known existing model (e.g. `ai/mistral`). This can be configured through the following environment variables: * `USE_MODEL_MAPPING` (default to true) - configures whether or not model mapping should be enabled. * `FALLBACK_MODEL` (no default) - configure a model which will be used if model mapping fails to find a target model. | Provider | Managed Language Models | | --- | --- | | [Playground](/docs/providers/playground#managed-services) | ✅ | | [AWS Bedrock](/docs/providers/aws#managed-llms) | ✅ | | [DigitalOcean GenAI](/docs/providers/digitalocean#future-improvements) | ❌ | | [GCP Vertex AI](/docs/providers/gcp#managed-llms) | ✅ |", + "path": "/docs/concepts/managed-llms/openai-access-gateway" + }, + { + "id": 86, + "about": "Managed Language Models, Current Support, Usage, Example, Deploying OpenAI-compatible apps, Managed LLM on Playground", + "text": "Each cloud provider offers their own managed Large Language Model services. AWS offers Bedrock, GCP offers Vertex AI, and Digital Ocean offers their GenAI platform. Defang makes it easy to leverage these services in your projects. | Provider | Managed Language Models | | --- | --- | | [Playground](/docs/providers/playground#managed-large-language-models) | ✅ | | [AWS Bedrock](/docs/providers/aws#managed-large-language-models) | ✅ | | [DigitalOcean GenAI](/docs/providers/digitalocean#future-improvements) | ❌ | | [GCP Vertex AI](/docs/providers/gcp#managed-large-language-models) | ✅ | In order to leverage cloud-native managed language models from your Defang services, all you need to do is add the `x-defang-llm` extension to the service config and Defang will configure the approprate roles and permissions for you. :::info Ensure you have enabled model access for the model you intend to use: * [Configure AWS Bedrock model access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access-modify.html) * [Configure GCP Vertex AI model access](https://cloud.google.com/vertex-ai/generative-ai/docs/control-model-access) ::: Assume you have a web service like the following, which uses the cloud native SDK, for example: ```diff services: app: build: context: . + x-defang-llm: true ``` If you already have an OpenAI-compatible application, Defang makes it easy to deploy on your favourite cloud's managed LLM service. See our [OpenAI Access Gateway](/docs/concepts/managed-llms/openai-access-gateway). :::tip Defang has a [*Managed LLM sample*](https://github.com/DefangLabs/samples/tree/main/samples/managed-llm) that uses the OpenAI Access Gateway, and a [*Managed LLM with Docker Model Provider sample*](https://github.com/DefangLabs/samples/tree/main/samples/managed-llm-provider) that uses a Docker Model Provider. ::: If you are using the Managed LLM feature on [Defang Playground](/docs/concepts/defang-playground), please note that your `MODEL` (model ID) will be limited to a default model chosen by Defang. To access a full range of models, consider using [Defang BYOC](/docs/concepts/defang-byoc).", + "path": "/docs/concepts/managed-llms/managed-language-models" + }, + { + "id": 87, "about": "Deploy Using Pulumi, Pre-requisites, Step 1 - Authenticate With Defang, Step 2 - Configure the Pulumi Backend, Step 3 - Initialize the Pulumi Project, Step 4 - Write Your Pulumi Code, Step 5 - Deploy to Defang, Step 6 - Monitor the Deployment, Logging Into Minio, Clean Up", "text": ":::warning At the time of writing, the Defang Pulumi Provider only works with [Defang Playground](../concepts/defang-playground.md). We are working on [BYOC](../concepts/defang-byoc.md) support. ::: This tutorial will show you how to deploy Minio with Pulumi using the Defang Provider. * [A Defang Account](/docs/concepts/authentication) * [Pulumi CLI](https://www.pulumi.com/docs/install) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) * [The Defang Pulumi Provider](/docs/concepts/pulumi#install-the-defang-pulumi-provider) Make sure you are logged into the [Defang CLI](/docs/concepts/authentication.md). Don't worry about the Pulumi CLI for now. Navigate to your project directly. (If you don't have a project yet, try one of our [samples](https://defang.io/samples)) Pulumi uses the `pulumi login` command to select a backend. In this tutorial, we will use the Local Filesystem backend for simplicity. When you are ready to deploy to production, you will probably want to look into other [Pulumi backend options](https://www.pulumi.com/docs/iac/concepts/state-and-backends). Run the following command to \"login\" to the filesystem backend in the local directory. ```bash pulumi login file://./ ``` This will make the Pulumi CLI store the state of your infrastructure in the current directory. Run the following command to set your encryption passphrase for this terminal session: ```bash export PULUMI_CONFIG_PASSPHRASE=\"super-secure-passphrase\" ``` Now let's initialize the Pulumi project: ```bash pulumi new typescript -y --force ``` This will create a new Pulumi project in the current directory and will create a Pulumi stack called `dev` by default. We use the `--force` flag because the directory isn't empty (we created a folder when we logged in with the Pulumi CLI). Run the following command to add to the `.gitignore` file: ```bash echo \".pulumi\" >> .gitignore ``` Create an `index.ts` file to contain your Pulumi code. This code will describe our services, our service's dependencies, and our service's configuration. ```typescript import * as defang from '@defang-io/pulumi-defang/lib'; export const service = new defang.DefangService('minio', { name: 'minio', image: 'quay.io/minio/minio', // starts the server with the console address set to :9001 command: ['server', '--console-address', ':9001', '/data'], // Set the root username environment: { MINIO_ROOT_USER: 'minio', }, secrets: [ // Set the root password as a secret which will be encrypted at rest { source: 'MINIO_ROOT_PASSWORD', value: 'minio123', }, ], // Run a healthcheck every 30 seconds healthcheck: { test: ['CMD', 'curl', 'http://localhost:9000/minio/health/live'], interval: 30, timeout: 5, retries: 3, }, // Expose the server on port 9000 and the console on port 9001 ports: [ { target: 9000, protocol: 'http', mode: 'ingress', }, { target: 9001, protocol: 'http', mode: 'ingress', }, ], }); ``` Now we're ready to deploy to Defang with Pulumi! Run the following command to deploy your service: ```bash pulumi up --stack=dev ``` You can monitor the deployment by running the following command: ```bash defang tail --name minio ``` The [Defang Playground](../concepts/defang-playground.md) will give you a domain, which you can obtain by running the following command: ```bash defang ls | grep 'minio.*9001' ``` If you navigate to the domain in your browser, you will be prompted to log in. Use the username `minio` and the password `minio123`. To clean up the deployment, run the following command: ```bash pulumi destroy --stack=dev ``` --- See the [Pulumi concept docs](/docs/concepts/pulumi) for more information about the Defang Pulumi Provider.", "path": "/docs/tutorials/deploy-using-pulumi" }, { - "id": 51, + "id": 88, "about": "Deploy to DigitalOcean, Pre-requisites, Step 1 - Navigate to your project directory, Step 2 - Authenticate Defang to use your DigitalOcean Account, Step 3 - Deploy, Step 4 - Inspect your deployment", "text": "This tutorial will show you how to deploy your services to your own DigitalOcean account using Defang. * [A Dockerfile in your project](https://docs.docker.com/get-started/docker-concepts/building-images/writing-a-dockerfile/) * [A `compose.yaml` file in your project](https://docs.docker.com/compose/gettingstarted/) * [A Defang Account](/docs/concepts/authentication) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) * [DigitalOcean Account Credentials](/docs/providers/digitalocean#getting-started) * [DigitalOcean Spaces Access Keys](/docs/providers/digitalocean#getting-started) Head to the folder where your project is located. ```bash $ cd path/to/your/project ``` Defang will look for your DigitalOcean credentials in your shell environment and expect to find the following credentials: * the `DIGITALOCEAN_TOKEN` environment variable * and, the `SPACES_ACCESS_KEY_ID` and `SPACES_SECRET_ACCESS_KEY` environment variables Invoke the `defang compose up` CLI command with the `--provider=do` flag or set the `DEFANG_PROVIDER=do` environment variable. For example: ```bash $ defang compose up --provider=do ``` Defang will provision resources in your DigitalOcean account and deploy your services. You can inspect the resources created in your DigitalOcean Dashboard. *** For a deeper discussion of the Defang DigitalOcean Architecture, see our [DigitalOcean Provider docs](/docs/providers/digitalocean).", "path": "/docs/tutorials/deploy-to-digitalocean" }, { - "id": 52, + "id": 89, "about": "---", "text": "# Using 1-Click Deploy\n\nThis tutorial will show you how to use Defang 1-Click Deploy to deploy a sample to the Defang Playground. \n\nThe 1-Click Deploy button is the easiest way for new users to deploy a sample project to the [Defang Playground](/docs/concepts/defang-playground). No CLI installation is required. \n\n:::info\nTo access the full range of features provided by Defang, we recommend using the [Defang CLI](/docs/getting-started).\n:::\n\n## Step 1 - Choose a Sample\nHead to our [list of samples](https://defang.io/#samples) and click a sample you want to deploy. Then, click on the button that says \"1-Click Deploy\".\n\n\"one-click-deploy-button\"\n
\n\n:::info\nAlternatively, you can find the \"1-Click Deploy\" button located in the `README.md` file of each sample's GitHub repository. \n\n\"deploy-with-defang-button\"\n
\n:::\n\n## Step 2 - Login\n\nFor 1-click deployments to work, Defang must have your permission, which you can grant by logging in. If you are already logged in, you will be automatically taken to the next step.\n\n![login-screen](/img/use-one-click-tutorial/login-screen.png)\n\n\n## Step 3 - Create Your Repo\n\nOnce logged in, you'll be redirected to GitHub. Click the \"Create repository button\" to create a new repository with the sample project. \n\n\"create-repository\"\n
\n\n\n## Step 4 - Wait for Deployment to Complete\n\nA Github Action workflow will automatically start running to install Defang and deploy the sample to the Defang Playground. You can see this by going into the \"Actions\" tab in your GitHub repository. \n\nYou can view the status of your deployment in the [Defang Portal](https://portal.defang.dev/), or by downloading the [Defang CLI](/docs/getting-started). You can also see deployment progress in the \"Actions\" tab of your GitHub repository:\n\n\"github-actions-tab\"\n\n
\n
\n\n:::tip\nIf you decide to make a commit later to a repository created from 1-Click Deploy, then the project will automatically get deployed again to Defang Playground.\n:::\n\nWhen it is completed, you can view your deployed app using the deployment link generated by Defang, which should appear similar to the format below:\n```\nhttps://---.defang.dev\n```\n\n### Configuration in 1-Click Deploy\n\nIf the sample you chose requires setting configuration, such as API keys, you can set sensitive config values as secrets in your GitHub repository and the GitHub Action can automatically handle those values for you. [Learn how to manage config values with the Defang Github Action](https://github.com/DefangLabs/defang-github-action?tab=readme-ov-file#managing-config-values).", "path": "/docs/tutorials/using-one-click-deploy" }, { - "id": 53, + "id": 90, "about": "Deploy a GPU-Powered Application to AWS, Prerequisites, AWS Account with GPU Access, HuggingFace Token, Step 1 - Clone the sample project, Step 2 - Check your [Defang BYOC](../concepts/defang-byoc.md) settings, Step 3 - Prepare your Environment, Step 4 - Explore the Compose File, The Mistral Service, The UI Service, Step 5 - Deploy to Your Own AWS Account with Defang", - "text": "This tutorial will show you how to create and deploy a GPU-powered application on AWS using Defang. We will walk you through the whole deployment process based on the [Mistral & vLLM](https://github.com/DefangLabs/samples/tree/main/samples/vllm) sample. :::info Note that GPU deployments are not permitted on the Defang Playground. You must [upgrade to a paid account](https://defang.io/pricing/) and [deploy to your own cloud account](https://docs.defang.io/docs/tutorials/deploy-to-your-cloud). ::: * [A Defang Account](/docs/concepts/authentication) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) * [AWS Account Credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-authentication.html) For any of this to work, you'll need to have access to GPU instances in your AWS account. To do that you'll need to go to the \"[Service Quotas](https://console.aws.amazon.com/servicequotas/home)\" console in your AWS account. From there you can request access to spot GPU instances. You'll need to request 8 or more because the value is per vCPU and the smallest GPU instance has 8 vCPUs. The instance types you're requesting are \"All G and VT spot instances\". :::warning Timing This process can take a few days for AWS to approve. ::: ![Service Quotas console screenshot](/img/gpu-tutorial/quotas.png) This sample requires a HuggingFace token to download the model. You can get a token by signing up at [HuggingFace](https://huggingface.co/join) and then going to [your settings](https://huggingface.co/settings/tokens) to get your token. You'll need to clone the [Mistral & vLLM](https://github.com/DefangLabs/samples/tree/main/samples/vllm) sample to go through this tutorial. * Make sure you [install the latest version of the Defang CLI](../getting-started#install-the-defang-cli.md) * Then, make sure you have properly [authenticated your AWS account](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). The Defang CLI makes use of AWS environment variables like `AWS_PROFILE`, `AWS_REGION`, `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY`, so make sure the correct values are set for those. :::tip If you have the AWS CLI installed (which is not required in order to use the Defang CLI), you can verify that you've authenticated to AWS by running `aws sts get-caller-identity` and see your account ID. ::: * Log into your Defang account ```bash defang login ``` * Set the HuggingFace token using the `defang config` command ```bash defang config set --name HF_TOKEN ``` [Configuration](../concepts/configuration.md) stores your sensitive information such as API keys, passwords, and other credentials for you. The `compose.yml` file is where you define your services and their configurations. In there you'll see the configuration we're using to deploy the Mistral model. We've highlighted some of the key aspects. ```yaml services: mistral: // highlight-next-line image: ghcr.io/mistralai/mistral-src/vllm:latest ports: - mode: host target: 8000 // highlight-next-line command: [\"--host\",\"0.0.0.0\",\"--model\",\"TheBloke/Mistral-7B-Instruct-v0.2-AWQ\",\"--quantization\",\"awq\",\"--dtype\",\"auto\",\"--tensor-parallel-size\",\"1\",\"--gpu-memory-utilization\",\".95\",\"--max-model-len\",\"8000\"] deploy: resources: reservations: cpus: '2.0' memory: 8192M devices: // highlight-next-line - capabilities: [\"gpu\"] healthcheck: test: [\"CMD\",\"curl\",\"http://localhost:8000/v1/models\"] interval: 5m timeout: 30s retries: 10 // highlight-start environment: - HF_TOKEN // highlight-end ``` **Let's break it down.** We start with the latest vLLM Docker image provided by [Mistral AI](https://docs.mistral.ai/self-deployment/vllm/). ```yaml mistral: image: ghcr.io/mistralai/mistral-src/vllm:latest ``` We specify that we require a GPU to run our application. ```yaml deploy: resources: reservations: cpus: '2.0' memory: 8192M devices: - capabilities: [\"gpu\"] ``` The Mistral model will be downloaded from HuggingFace. We need to have a HuggingFace Token to enable the installation, so we specify that we need to get the `HF_TOKEN` configuration value from Defang. Specifying the `HF_TOKEN` in the `environment` section of the service in the `compose.yml` file tells Defang to fetch the value from the encrypted configuration store. ```yaml environment: - HF_TOKEN ``` In this sample we also provide a simple UI to interact with the endpoint created by vLLM. The UI service is a Next.js application that runs on port 3000. :::tip Networking You can see here how Defang's [networking](../concepts//networking.mdx) works. The `mistral` service is available at `http://mistral:8000`, exactly as it would be in a local `docker-compose` environment. ::: ```yaml ui: restart: unless-stopped build: context: ui dockerfile: Dockerfile ports: - mode: ingress target: 3000 deploy: resources: reservations: memory: 256M healthcheck: test: [\"CMD\",\"wget\",\"--spider\",\"http://localhost:3000\"] interval: 10s timeout: 2s retries: 10 environment: // highlight-next-line - OPENAI_BASE_URL=http://mistral:8000/v1/ ``` Run the following command to deploy your service: ```bash defang compose up --provider=aws ```", + "text": "This tutorial will show you how to create and deploy a GPU-powered application on AWS using Defang. We will walk you through the whole deployment process based on the [Mistral & vLLM](https://github.com/DefangLabs/samples/tree/main/samples/vllm) sample. :::info Note that GPU deployments are not permitted on the Defang Playground. You must [upgrade to a paid account](https://defang.io/pricing/) and [deploy to your own cloud account](/docs/tutorials/deploy-to-your-cloud). ::: - [A Defang Account](/docs/concepts/authentication) - [The Defang CLI](/docs/getting-started#install-the-defang-cli) - [AWS Account Credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-authentication.html) For any of this to work, you'll need to have access to GPU instances in your AWS account. To do that you'll need to go to the \"[Service Quotas](https://console.aws.amazon.com/servicequotas/home)\" console in your AWS account. From there you can request access to spot GPU instances. You'll need to request 8 or more because the value is per vCPU and the smallest GPU instance has 8 vCPUs. The instance types you're requesting are \"All G and VT spot instances\". :::warning[Timing] This process can take a few days for AWS to approve. ::: ![Service Quotas console screenshot](/img/gpu-tutorial/quotas.png) This sample requires a HuggingFace token to download the model. You can get a token by signing up at [HuggingFace](https://huggingface.co/join) and then going to [your settings](https://huggingface.co/settings/tokens) to get your token. You'll need to clone the [Mistral & vLLM](https://github.com/DefangLabs/samples/tree/main/samples/vllm) sample to go through this tutorial. - Make sure you [install the latest version of the Defang CLI](../getting-started#install-the-defang-cli.md) - Then, make sure you have properly [authenticated your AWS account](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). The Defang CLI makes use of AWS environment variables like `AWS_PROFILE`, `AWS_REGION`, `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY`, so make sure the correct values are set for those. :::tip If you have the AWS CLI installed (which is not required in order to use the Defang CLI), you can verify that you've authenticated to AWS by running `aws sts get-caller-identity` and see your account ID. ::: - Log into your Defang account ```bash defang login ``` - Set the HuggingFace token using the `defang config` command ```bash defang config set --name HF_TOKEN ``` [Configuration](../concepts/configuration.md) stores your sensitive information such as API keys, passwords, and other credentials for you. The `compose.yml` file is where you define your services and their configurations. In there you'll see the configuration we're using to deploy the Mistral model. We've highlighted some of the key aspects. ```yaml services: mistral: // highlight-next-line image: ghcr.io/mistralai/mistral-src/vllm:latest ports: - mode: host target: 8000 // highlight-next-line command: [\"--host\",\"0.0.0.0\",\"--model\",\"TheBloke/Mistral-7B-Instruct-v0.2-AWQ\",\"--quantization\",\"awq\",\"--dtype\",\"auto\",\"--tensor-parallel-size\",\"1\",\"--gpu-memory-utilization\",\".95\",\"--max-model-len\",\"8000\"] deploy: resources: reservations: cpus: '2.0' memory: 8192M devices: // highlight-next-line - capabilities: [\"gpu\"] healthcheck: test: [\"CMD\",\"curl\",\"http://localhost:8000/v1/models\"] interval: 5m timeout: 30s retries: 10 // highlight-start environment: - HF_TOKEN // highlight-end ``` **Let's break it down.** We start with the latest vLLM Docker image provided by [Mistral AI](https://docs.mistral.ai/self-deployment/vllm/). ```yaml mistral: image: ghcr.io/mistralai/mistral-src/vllm:latest ``` We specify that we require a GPU to run our application. ```yaml deploy: resources: reservations: cpus: '2.0' memory: 8192M devices: - capabilities: [\"gpu\"] ``` The Mistral model will be downloaded from HuggingFace. We need to have a HuggingFace Token to enable the installation, so we specify that we need to get the `HF_TOKEN` configuration value from Defang. Specifying the `HF_TOKEN` in the `environment` section of the service in the `compose.yml` file tells Defang to fetch the value from the encrypted configuration store. ```yaml environment: - HF_TOKEN ``` In this sample we also provide a simple UI to interact with the endpoint created by vLLM. The UI service is a Next.js application that runs on port 3000. :::tip You can see here how Defang's [networking](../concepts//networking.mdx) works. The `mistral` service is available at `http://mistral:8000`, exactly as it would be in a local `docker-compose` environment. ::: ```yaml ui: restart: unless-stopped build: context: ui dockerfile: Dockerfile ports: - mode: ingress target: 3000 deploy: resources: reservations: memory: 256M healthcheck: test: [\"CMD\", \"wget\", \"--spider\", \"http://localhost:3000\"] interval: 10s timeout: 2s retries: 10 environment: // highlight-next-line - OPENAI_BASE_URL=http://mistral:8000/v1/ ``` Run the following command to deploy your service: ```bash defang compose up --provider=aws ```", "path": "/docs/tutorials/deploy-with-gpu" }, { - "id": 54, + "id": 91, "about": "Deploy to Google Cloud Platform (GCP), Pre-requisites, Step 1 - Navigate to your project directory, Step 2 - Authenticate Defang to use your GCP Account, Step 3 - Deploy, Step 4 - Inspect your deployment", - "text": "This tutorial will show you how to deploy your services to your own GCP project using Defang. * [A Dockerfile in your project](https://docs.docker.com/get-started/docker-concepts/building-images/writing-a-dockerfile/) * [A `compose.yaml` file in your project](https://docs.docker.com/compose/gettingstarted/) * [A Defang Account](/docs/concepts/authentication) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) * [GCP Account Credentials](https://cloud.google.com/docs/authentication) Head to the folder where your project is located. ```bash $ cd path/to/your/project ``` After signing in to your GCP account, select an existing project or [create a new project](https://developers.google.com/workspace/guides/create-project). Make sure [billing is enabled](https://cloud.google.com/billing/docs/how-to/modify-project). Then, note down the project ID and set it as environment variable `GCP_PROJECT_ID`. Invoke the `defang compose up` CLI command with the `--provider=gcp` flag or set the `DEFANG_PROVIDER=gcp` environment variable. For example: ```bash $ defang compose up --provider=gcp ``` Defang will provision resources in your GCP account and deploy your services. You can inspect the resources created in your [GCP Dashboard](https://console.cloud.google.com/). *** For a deeper discussion of the Defang GCP Architecture, see our [GCP docs](/docs/providers/gcp).", + "text": "This tutorial will show you how to deploy your services to your own GCP project using Defang. * [A Dockerfile in your project](https://docs.docker.com/get-started/docker-concepts/building-images/writing-a-dockerfile/) * [A `compose.yaml` file in your project](https://docs.docker.com/compose/gettingstarted/) * [A Defang Account](/docs/concepts/authentication) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) * [gcloud CL](https://cloud.google.com/sdk/docs/install) * [GCP Account Credentials](https://cloud.google.com/docs/authentication/set-up-adc-local-dev-environment) Head to the folder where your project is located. ```bash $ cd path/to/your/project ``` After signing in to your GCP account, select an existing project or [create a new project](https://developers.google.com/workspace/guides/create-project). Make sure [billing is enabled](https://cloud.google.com/billing/docs/how-to/modify-project). Then, note down the project ID and set it as environment variable `GCP_PROJECT_ID`. Invoke the `defang compose up` CLI command with the `--provider=gcp` flag or set the `DEFANG_PROVIDER=gcp` environment variable. For example: ```bash $ defang compose up --provider=gcp ``` Defang will provision resources in your GCP account and deploy your services. You can inspect the resources created in your [GCP Dashboard](https://console.cloud.google.com/). *** For a deeper discussion of the Defang GCP Architecture, see our [GCP docs](/docs/providers/gcp).", "path": "/docs/tutorials/deploy-to-gcp" }, { - "id": 55, + "id": 92, "about": "Deploy to Amazon Web Services (AWS), Pre-requisites, Step 1 - Navigate to your project directory, Step 2 - Authenticate Defang to use your AWS Account, Step 3 - Deploy, Step 4 - Inspect your deployment", "text": "This tutorial will show you how to deploy your services to your own AWS account using Defang. * [A Dockerfile in your project](https://docs.docker.com/get-started/docker-concepts/building-images/writing-a-dockerfile/) * [A `compose.yaml` file in your project](https://docs.docker.com/compose/gettingstarted/) * [A Defang Account](/docs/concepts/authentication) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) * [AWS Account Credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-authentication.html) Head to the folder where your project is located. ```bash $ cd path/to/your/project ``` There are many ways to authenticate your [AWS account](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-authentication.html). Once you've done that, Defang will look for your AWS credentials in your shell environment and expect to find one of the following credential sets: * the `AWS_PROFILE` environment variable * or, the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables :::tip If you have the AWS CLI installed (which is not required in order to use the Defang CLI), you can verify that you've authenticated to AWS by running `aws sts get-caller-identity` and see your account ID. ::: Invoke the `defang compose up` CLI command with the `--provider=aws` flag or set the `DEFANG_PROVIDER=aws` environment variable. For example: ```bash $ defang compose up --provider=aws ``` Defang will provision resources in your AWS account and deploy your services. You can inspect the resources created in your AWS Dashboard. *** For a deeper discussion of the Defang AWS Architecture, including a list of the resources we provision in your account, see our [AWS Provider docs](/docs/providers/aws).", "path": "/docs/tutorials/deploy-to-aws" }, { - "id": 56, + "id": 93, "about": "Using Codespaces and Gitpod, Using Codespaces With Defang, Step 1 - Clone the Defang Codespace Project, Step 2 - Create a Codespace, Step 3 - Open in VS Code Desktop, Step 4 - Run Defang Login, Step 5 - Verify Running Services, Using Gitpod With Defang, Step 1 - Clone the Defang Gitpod Workspace Project, Step 2 - Initialize a Gitpod Workspace, Step 3 - Lauch VS Code from Gitpod, Step 4 - Run Defang Login, Step 5 - Verify Running Services", "text": "This tutorial will guide you to set up Defang in both GitHub Codespaces and Gitpod. Start by cloning the [Defang GitHub-Codespace](https://github.com/DefangLabs/github-codespace) repo and pushing it to your own account. This repository is configured with a Codespace that has Defang pre-installed. Once you've pushed to your own GitHub repo, you'll be able to create a Codespace by clicking the Code button, selecting the Codespaces tab, and clicking the + icon. This will set up a development environment with Defang already installed, which might take a few minutes. ![Create Codespace button screenshot](/img/codespace-tutorial/new-codespace.png) For the `defang login` command to work correctly, you must open the Codespace in VS Code desktop. This is required because the login process is designed to run on localhost. ![Open in vs code desktop button screenshot](/img/codespace-tutorial/desktop.png) Within a VS Code desktop terminal, execute the following command. ```bash defang login ``` Although it may initially refuse to connect on your localhost, going back will show a \"successfully logged in\" message, confirming that you're logged into Defang. Now that you're logged in, you can use Defang commands. You can test that everything is working properly by running `defang ls` to list your running services. Start by cloning the [Defang Gitpod-Workspace](https://github.com/DefangLabs/gitpod-workspace) repo and pushing it to your own GitHub, GitLab, or BitBucket account. This repository includes a Workspace configuration that pre-installs Defang. Navigate `https://gitpod.io/#` to create your new workspace. In the repository, we have a YAML file indicating that we are using a pre-built Dockerfile which installs Defang CLI for you. Open VS Code from Gitpod, you will likely need to have the Gitpod VS Code extension installed. ![Open in vs code desktop button screenshot](/img/codespace-tutorial/gitpod-desktop.png) ![Screenshot of Gitpod extension](/img/codespace-tutorial/gitpod-ext.png) Within a VS Code desktop terminal, execute the following command. ```bash defang login ``` Now that you're logged in, you can use Defang commands. You can test that everything is working properly by running `defang ls` to list your running services.", "path": "/docs/tutorials/using-codespaces-gitpod" }, { - "id": 57, + "id": 94, + "about": "Setting Up Your GCP Account, Step 1: Create Your GCP Account, Step 2: Create a GCP Project, Step 3: Create Your Billing Account, Step 4: Link Your Project to the Billing Account, Step 5: Set Up Your GCP CLI", + "text": "Follow these steps to set up your Google Cloud Platform (GCP) account for deploying applications with Defang. --- Start by creating a Google Cloud account using the [GCP account creation flow](https://cloud.google.com/). If you already have an account skip to the next step.
\"Google
Google GCP sign up page
--- To deploy apps using Defang, you need a GCP project. Projects help organize related resources such as services, IAM policies, quotas, and billing. If you already have a project, skip to [Step 3](#step-3-create-your-billing-account). To create a new project: 1. Visit the [GCP Console](https://console.cloud.google.com/). 2. Click the project selector button (it may say **\"Select a project\"** or display a previous project name).
\"GCP
Select project button to open the project dialog
3. Follow the prompts to create a new project. :::info Take note of your **Project ID** — you’ll need it later when setting the `GCP_PROJECT_ID` environment variable. :::
\"GCP
Create a new project in the dialog
--- Billing accounts let Google allocate costs to specific entities or users. You’ll need one to deploy services. 1. In the [GCP Console](https://console.cloud.google.com/), open the navigation menu. 2. Select **Billing**.
\"Billing
Billing menu option
1. In the billing side panel, select **Manage billing accounts**, then click **Create account**. 2. Follow the steps to create a billing account. A credit card is required.
\"Create
Billing management page
--- Once your project and billing account are created, link them together: 1. In the GCP Console, select your project from the project selector. 2. Open the **Billing** section again from the left sidebar. 3. Click **Link a billing account**.
\"Link
Link billing account dialog
1. Choose the billing account to link.
\"Billing
Select a billing account to link
--- To complete your setup, install the **gcloud CLI** by following the [installation guide](https://cloud.google.com/sdk/docs/install). After installation, authenticate with: ``` gcloud auth application-default login ``` This will generate your **application default credentials** used during deployment. --- Now you're ready to deploy! See the [Deploy to GCP tutorial](/docs/tutorials/deploy-to-gcp.mdx) for next steps.", + "path": "/docs/tutorials/setting_up_your_gcp_account" + }, + { + "id": 95, "about": "---", "text": "import { URLProvider, URLEncode } from \"../../src/components/OneClick\";\n\n\n\n# Adding Custom 1-Click Deploy to Your App\n\nThis tutorial will show you how to add a 1-Click Deploy link so other people can easily deploy your app to the Defang Playground and eventually to their own cloud accounts.\n\nThe link is often placed as a button in the `README.md` file of your project repository, and is the easiest way to allow anyone to deploy your app. \n\n:::info\nIf you are trying to use 1-Click Deploy with one of our [samples](https://defang.io/samples/), we have a separate tutorial on [how to use 1-Click Deploy](/docs/tutorials/using-one-click-deploy). \n:::\n\n### How 1-Click Deploy Works\nA 1-Click Deploy link points to a special URL. Clicking this link will prompt the user to create a new GitHub repo with a clone of your project. This new repo will contain a GitHub Actions workflow which will automatically deploy the project to Defang. \n\n## Pre-requisites\n\n- A [GitHub](https://github.com/) repository for your project\n\n## Step 1 - Prepare Your App\n\nYou will need a `compose.yaml` file in the root folder of your project repository to describe the services which will be deployed. Learn more about writing [Compose files](/docs/concepts/compose#example-of-a-compose-file).\n\n## Step 2 - Copy the GitHub Workflow\n \nIn your project, add a new folder named `.github`, then add a folder named `workflows` inside it, and copy the entire GitHub Workflow `deploy.yaml` file from [here](https://github.com/DefangLabs/samples/blob/main/starter-sample/.github/workflows/deploy.yaml) into the `workflows` folder. \n \nIf your app requires [configuration](/docs/concepts/configuration) (e.g. API keys, passwords, etc.), learn more about [managing config variables with the Defang GitHub Action](https://github.com/DefangLabs/defang-github-action?tab=readme-ov-file#managing-config-values). \n\nYou should have a `.yaml` file similar to the one below:\n```yaml\nname: Deploy\n\non:\n push:\n branches:\n - main\n\njobs:\n deploy:\n environment: playground\n runs-on: ubuntu-latest\n permissions:\n contents: read\n id-token: write\n\n steps:\n - name: Checkout Repo\n uses: actions/checkout@v4\n\n - name: Deploy\n uses: DefangLabs/defang-github-action@v1.1.3\n```\n\n## Step 3 - Prepare Your Repository\n\nTo support 1-Click Deploy, your repository must be a [Template Repository](https://docs.github.com/en/repositories/creating-and-managing-repositories/creating-a-repository-from-a-template). In order to make your repository into a Template Repository, complete the following steps:\n1. Go to the repository's \"Settings\" near the right on the top bar. \n2. Select the \"Template repository\" box to turn your repo into a template repo, as seen below:\n\n![select-template-repo-box](/img/custom-one-click-tutorial/select-template-repo-box.png)\n\n3. Go back to your repository's homepage.\n4. Click the dropdown menu next to the green \"Use this template\" button near the top right corner, then click \"Create a new repository\":\n\n![use-this-template](/img/custom-one-click-tutorial/use-this-template.png)\n\n5. You will be taken to a page similar to the one shown below.\n\n![create-repo](/img/custom-one-click-tutorial/create-repo.png)\n\nNow stay on this page and continue to the next step.\n\n## Step 4 - Get the Encoded URL\n\nYou will need the encoded version of the URL of the page from the previous step. This will be used as a redirect for the 1-Click Deploy link. \n\n1. Copy the URL of the page you are on ( \"Create a new repository\" page sourced from your template repository). It should look something like the following:\n```\nhttps://github.com/new?template_name=&template_owner=\n```\n2. You need to URL encode your url for the next step. For example, the url above would be encoded as:\n\n```\nhttps%3A%2F%2Fgithub.com%2Fnew%3Ftemplate_name%3D%26template_owner%3D\n```\n\nYou can just paste your url in here to get the encoded version:\n\n\n\n## Step 5 - Create the 1-Click Deploy Link\n\nYou will need to create a 1-Click Deploy link with the following format: `https://portal.defang.dev/redirect?url=` + your encoded URL. This ensures that the user can get [logged in](/docs/concepts/authentication/) to Defang before they get redirected to clone your app for deployment. \n\n:::tip\nOptionally, you can add `&name=` to the end of the link if you'd like to suggest a name for the user to use for their deployment.\n:::\n\nThe finished link should look similar to the one below:\n```\nhttps://portal.defang.dev/redirect?url=https%3A%2F%2Fgithub.com%2Fnew%3Ftemplate_name%3D%26template_owner%3D&name=\n```\n\nNow you have a link for anyone to deploy your app to Defang, with just 1 click. \n\n## Step 6 - Use the Link\nFor example, you can add it as a link in a Markdown file:\n```\n[1-Click Deploy Link](https://portal.defang.dev/redirect?url=&name=)\n```\nOr perhaps you can add it to a button with your own styling:\n```\n[![1-click-deploy-button](https://defang.io/deploy-with-defang.png)](https://portal.defang.dev/redirect?url=&name=)\n```\n\n", "path": "/docs/tutorials/adding-custom-one-click-deploy" }, { - "id": 58, + "id": 96, "about": "Monitoring Your Services, Status, Logs, Tailing Live Logs For All Services, Tailing Logs Since a Specific Time, Tailing Logs For a Service, Build Time Logs", "text": "This tutorial will show you how to monitor your service status and logs in Defang. When deploying to Playground, you can monitor your service status from the [Defang Portal](https://portal.defang.dev). When deploying to your own cloud, the primary way to monitor your services is through your cloud provider's dashboard. However, Defang does provide some tools for monitoring your service logs. When deploying to Playground, your service's logs may be viewed in the [Defang Portal](https://portal.defang.dev). By default when deploying, including to your own cloud, all output (stdout and stderr) from your app is logged and accessible via the [Defang CLI](/docs/getting-started#install-the-defang-cli). You can view these logs in real-time or for a time in the past. You can view logs for all your services, one service, or even one specific deployment of a service. ```bash $ defang tail ``` ```bash $ defang tail --since 1h ``` ```bash $ defang tail --name service-name ``` ```bash $ defang logs --type=build ``` All of the above flags can be combined to get the exact logs you need. See the CLI reference for [`defang tail`](/docs/cli/defang_tail) for more information. :::info * To learn more about observability in Defang, check out the [Observability page](../concepts/observability.md). * Note that the Defang Portal only displays services deployed to Defang Playground. :::", "path": "/docs/tutorials/monitoring-your-services" }, { - "id": 59, + "id": 97, "about": "Configure Environment Variables, Pre-requisites, Step 1 - Go to your `compose.yaml` file, Step 2 - Set the actual value in the Defang CLI, Editing a config value, Removing a config value, Step 3 - Deploy", "text": "This tutorial will show you how to configure sensitive environment variables in Defang. * [A `compose.yaml` file in your project](https://docs.docker.com/compose/gettingstarted/) * [A Defang Account](/docs/concepts/authentication) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) :::info If you are using [Pulumi](/docs/concepts/pulumi) instead of Compose files to define your services, please see [Using Config With Pulumi](/docs/concepts/configuration#using-config-with-pulumi) instead. ::: In your Compose file, you can define a sensitive config variable for your service by leaving it as a **blank or `null` value**. Defang will recognize it as a sensitive value. In the example below, let's define `API_KEY` as an environment variable. ```yaml services: service1: image: image1:latest environment: - API_KEY ``` The type of notation shown above is called *list notation*. Alternatively, you can use *map notation*, which is also acceptable: ```yaml services: service1: image: image1:latest environment: API_KEY: ``` To store the actual (sensitive) value of the variable, open up a terminal and type the command: ```bash defang config set API_KEY=actualvalue ``` Remember to replace `API_KEY` with your variable name and `actualvalue` with your actual value. :::tip You can view all the config variables you are storing in Defang by doing: `defang config ls`. ::: To edit a value, you can run the command again with an updated value to overwrite the current value: ```bash defang config set API_KEY=newvalue ``` To remove a value, run the command: ```bash defang config rm API_KEY ``` :::tip Remember to update your Compose file if you remove an environment variable. ::: ```bash defang compose up ``` --- For a deeper discussion on how configuration works in Defang, see our [Configuration docs](/docs/concepts/configuration).", "path": "/docs/tutorials/configure-environment-variables" }, { - "id": 60, + "id": 98, "about": "Generate Project Outlines With AI, Step 1 - Use the `defang generate` command in the CLI, Step 2 - Review the Code, Step 3 - Build and Deploy", "text": "This tutorial will show you how to use Defang's AI agent to [generate](/docs/concepts/generate) a project outline, and then deploy that project to the cloud using Defang. Here you can describe what you would like the service to do and the [CLI](/docs/getting-started#install-the-defang-cli) will then [generate](/docs/concepts/generate) a project outline with all the files required to make it deployable with Defang. In this tutorial, we'll use the following prompt to describe our service: \"A basic service with 2 REST endpoints. The default endpoint will be for health check and should return a JSON object like this: `{ \"status\": \"OK\" }`. The /echo endpoint will echo back all request parameters in the response.\" ```text defang generate ? Choose the language you'd like to use: [Use arrows to move, type to filter, ? for more help] > Nodejs Golang Python ? Choose a sample service: Generate with AI ? Please describe the service you'd like to build: [? for help] A basic service with 2 REST endpoints. The default endpoint will be for health check and should return a JSON object like this: { \"status\": \"OK\" }. The /echo endpoint will echo back all request parameters in the response. ? What folder would you like to create the service in? [? for help] (service1) project1 ``` This will [generate](/docs/concepts/generate) the different files required to start your project based on your prompt and the language selected. Change into the new project folder: ```shell cd project1 ``` You can open the files in a code editor to review or make changes as needed before deploying the service. ```bash defang compose up ```", "path": "/docs/tutorials/generate-new-code-using-ai" }, { - "id": 61, + "id": 99, "about": "Deploy to Playground, Pre-requisites, Step 1 - Navigate to your project directory, Step 2 - Deploy", "text": "This tutorial will show you how to deploy your project to the free [Defang Playground](/docs/concepts/defang-playground). * [A Dockerfile in your project](https://docs.docker.com/get-started/docker-concepts/building-images/writing-a-dockerfile/) * [A `compose.yaml` file in your project](https://docs.docker.com/compose/gettingstarted/) * [A Defang Account](/docs/concepts/authentication) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) Head to the folder where your project is located. ```bash $ cd path/to/your/project ``` ``` $ defang compose up ``` If you have not used Defang before, you'll be prompted to log in. ``` ! Please log in to continue. Please visit http://127.0.0.1:49154 and log in. (Right click the URL or press ENTER to open browser) ``` :::info To learn more about how authentication works in defang, check out the [authenticating page](/docs/concepts/authentication). ::: When you do this, you should see something similar to the output below: ``` * Uploading build context for app * Deploying service app * Monitor your services' status in the defang portal - https://portal.defang.dev/service/app * Tailing logs for deployment ID o59k89vk3qc8 ; press Ctrl+C to detach: * Press V to toggle verbose mode 2024-09-19T10:50:53.572443-07:00 cd Update started for stack jordanstephens-prod1 2024-09-19T10:51:05.536299-07:00 cd Update succeeded in 11.99769745s ; provisioning... 2024-09-19T10:51:39.419693-07:00 app Server running at http://0.0.0.0:3000/ * Service app is in state DEPLOYMENT_COMPLETED and will be available at: - https://jordanstephens-app--3000.prod1.defang.dev * Done. ``` Now we can go to [https://portal.defang.dev/service/app](https://portal.defang.dev/service/app) to see our service listed in the Defang portal. ![screenshot of the defang portal](/img/getting-started-portal.png)", "path": "/docs/tutorials/deploy-to-playground" }, { - "id": 62, + "id": 100, "about": "Updating Your Services, Updating a Service, Deploying New Services or Removing Existing Services, Deployment Modes, Development Mode (Default), Production Mode (Zero-downtime Updates), Deleting your Application", - "text": "This tutorial will show you how to update your services in Defang. To update your app (for example, updating the base image of your container, or making changes to your code) all you have to do is run the `defang compose up` command and it will build and deploy a new version. If you are using [Compose files](../concepts/compose.md) to define your services, you can add/remove services, make changes to code, etc. When you run `defang compose up`, the update will be diffed against the current state and any necessary changes will be applied to make the current state match the desired state. Defang offers multiple [deployment modes](/docs/concepts/deployment-modes). You can specify which mode you would like to be used when you deploy with the `--mode` flag on the CLI. The default mode is `development`. You may also specify `staging` or `production`. ```shell $ defang compose up --mode=production ``` The default deployment mode is `development`. This is the In this mode, the existing services will be deprovisioned before your new service will be spun up. This means that there will be a short downtime while the new service is being provisioned. If you are running in `production` mode, the update will be done with zero downtime. Your current version of the service will keep running and handling traffic while the new version is being built and deployed. Only after the new version passes the health checks and accepts traffic will the older version be stopped. To delete your app, use `defang compose down` in your compose file working directory, or use the `--project-name` flag. In some cases, particularly on the AWS platform, additional actions may be required. Specifically load balancers may have Deletion Protection on. To turn this off in the AWS Console for EC2 Load Balancers, follow these steps: 1.\tSelect the load balancer corresponding to the app’s name. 2.\tGo to the Attributes tab. 3.\tClick the Edit button. 4.\tLocate Deletion Protection and disable it. :::info For more information on Deployment Modes, see the [Deployment Modes](/docs/concepts/deployment-modes) concept documentation. :::", + "text": "This tutorial will show you how to update your services in Defang. To update your app (for example, updating the base image of your container, or making changes to your code) all you have to do is run the `defang compose up` command and it will build and deploy a new version. If you are using [Compose files](../concepts/compose.md) to define your services, you can add/remove services, make changes to code, etc. When you run `defang compose up`, the update will be diffed against the current state and any necessary changes will be applied to make the current state match the desired state. Defang offers multiple [deployment modes](/docs/concepts/deployment-modes). You can specify which mode you would like to be used when you deploy with the `--mode` flag on the CLI. The default mode is `affordable`. You may also specify `balanced` or `high_availability`. ```shell $ defang compose up --mode=production ``` The default deployment mode is `development`. This is the In this mode, the existing services will be deprovisioned before your new service will be spun up. This means that there will be a short downtime while the new service is being provisioned. If you are running in `production` mode, the update will be done with zero downtime. Your current version of the service will keep running and handling traffic while the new version is being built and deployed. Only after the new version passes the health checks and accepts traffic will the older version be stopped. To delete your app, use `defang compose down` in your compose file working directory, or use the `--project-name` flag. In some cases, particularly on the AWS platform, additional actions may be required. Specifically load balancers may have Deletion Protection on. To turn this off in the AWS Console for EC2 Load Balancers, follow these steps: 1.\tSelect the load balancer corresponding to the app’s name. 2.\tGo to the Attributes tab. 3.\tClick the Edit button. 4.\tLocate Deletion Protection and disable it. :::info For more information on Deployment Modes, see the [Deployment Modes](/docs/concepts/deployment-modes) concept documentation. :::", "path": "/docs/tutorials/updating-your-services" }, { - "id": 63, - "about": "Scaling Your Services, Scaling Resource Reservations, Scaling with Replicas", - "text": "This tutorial will show you how to scale your services with Defang. There are two primary ways to scale a service. The first way is to increase the resources allocated to a service. For example, giving a service more CPUs or memory. The second way is to deploy multiple instances of a service. This is called scaling with replicas. Defang makes it easy to do both. The _Compose Specification_, which is used by Defang, includes a [`deploy` section](https://github.com/compose-spec/compose-spec/blob/main/deploy.md) which allows you to configure the deployment configuration for a service. This includes your service's resource requirements and the number of replicas of a service should be deployed. In order to scale a service's resource reservations, you will need to update the `deploy` section associated with your service in your application's `compose.yaml` file. Use the [`resources`](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#resources) section to specify the resource reservation requirements. These are the minimum resources which must be available for the platform to provision your service. You may end up with more resources than you requested, but you will never be allocated less. For example, if my app needs 2 CPUs and 512MB of memory, I would update the `compose.yaml` file like this: ```yaml services: my_service: image: my_app:latest deploy: resources: reservations: cpus: '2' memory: '512M' ``` The minimum resources which can be reserved: | Resource | Minimum | |----------|---------| | CPUs | 0.5 | | Memory | 512M | :::info Note that the `memory` field must be specified as a [\"byte value string\"](https://github.com/compose-spec/compose-spec/blob/main/11-extension.md#specifying-byte-values) using the `{amount}{byte unit}` format. The supported units are `b` (bytes), `k` or `kb` (kilobytes), `m` or `mb` (megabytes) and `g` or `gb` (gigabytes). ::: In order to scale a service's replica count, you will need to update the `deploy` section associated with your service in your application's `compose.yaml` file. Use the [`replicas`](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#replicas) section to specify the number of containers which should be running at any given time. For example, if I want to run 3 instances of my app, I would update the `compose.yaml` file like this: ```yaml services: my_service: image: my_app:latest deploy: replicas: 3 ```", + "id": 101, + "about": "Scaling Your Services, Scaling Resource Reservations, Scaling with Replicas, Autoscaling Your Services, Enabling Autoscaling", + "text": "This tutorial will show you how to scale your services with Defang. There are two primary ways to scale a service. The first way is to increase the resources allocated to a service. For example, giving a service more CPUs or memory. The second way is to deploy multiple instances of a service. This is called scaling with replicas. Defang makes it easy to do both. The _Compose Specification_, which is used by Defang, includes a [`deploy` section](https://github.com/compose-spec/compose-spec/blob/main/deploy.md) which allows you to configure the deployment configuration for a service. This includes your service's resource requirements and the number of replicas of a service should be deployed. In order to scale a service's resource reservations, you will need to update the `deploy` section associated with your service in your application's `compose.yaml` file. Use the [`resources`](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#resources) section to specify the resource reservation requirements. These are the minimum resources which must be available for the platform to provision your service. You may end up with more resources than you requested, but you will never be allocated less. For example, if my app needs 2 CPUs and 512MB of memory, I would update the `compose.yaml` file like this: ```yaml services: my_service: image: my_app:latest deploy: resources: reservations: cpus: \"2\" memory: \"512M\" ``` The minimum resources which can be reserved: | Resource | Minimum | | -------- | ------- | | CPUs | 0.5 | | Memory | 512M | :::info Note that the `memory` field must be specified as a [\"byte value string\"](https://github.com/compose-spec/compose-spec/blob/main/11-extension.md#specifying-byte-values) using the `{amount}{byte unit}` format. The supported units are `b` (bytes), `k` or `kb` (kilobytes), `m` or `mb` (megabytes) and `g` or `gb` (gigabytes). ::: In order to scale a service's replica count, you will need to update the `deploy` section associated with your service in your application's `compose.yaml` file. Use the [`replicas`](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#replicas) section to specify the number of containers which should be running at any given time. For example, if I want to run 3 instances of my app, I would update the `compose.yaml` file like this: ```yaml services: my_service: image: my_app:latest deploy: replicas: 3 ``` Autoscaling allows your services to automatically adjust the number of replicas based on CPU usage — helping you scale up during traffic spikes and scale down during quieter periods. > **Note:** Autoscaling is only available to **Pro** tier or higher users. To enable autoscaling for a service, add the `x-defang-autoscaling: true` extension under the service definition in your `compose.yaml` file and remove the _**replicas**_ field in your _**deploy**_ mapping, if present. Autoscaling is available in staging and production [deployments modes](/docs/concepts/deployments#deployment_modes) only. Example: ```yaml services: web: image: myorg/web:latest ports: - 80:80 x-defang-autoscaling: true ``` Once deployed, your services' CPU usage is monitored for how much load it is handling, sustained high loads will result in more replicas being started. Requirements - BYOC, your own cloud platform account. - You must be on the Pro or higher plan to use autoscaling. ([Defang plans](https://defang.io/#pricing)) - _**replicas**_ must **NOT** be defined - Only staging and production deployment modes supported. ([Deployment modes](/docs/concepts/deployment-modes)) - The service must be stateless or able to run in multiple instances. ([Scaling](/docs/concepts/scaling)) Best Practices - Design your services to be horizontally scalable. ([12 Factor App](https://12factor.net/processes)) - Use shared or external storage if your service writes data. (e.g. Postgres or Redis [managed services](/docs/concepts/managed-storage) )", "path": "/docs/tutorials/scaling-your-services" }, { - "id": 64, + "id": 102, "about": "How to Use Your Own Domain With AWS, Prerequisites, Step 1 - Set up a Hosted Zone in AWS Route 53, Step 2 - Configure your DNS settings in your domain registrar, Step 3 - Set up Your Compose File, Step 4 - Deploy", "text": "This tutorial will show you how to set up and use your own domain when deploying to AWS using Defang. * [A Defang Account](/docs/concepts/authentication) * [The Defang CLI](/docs/getting-started#install-the-defang-cli) * [AWS Account Credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-authentication.html) :::info **If you purchased your domain with AWS, you can skip this step.** ::: For Defang to be able to manage your domain, you need to create a [public hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html) in AWS Route 53. It should list four AWS nameservers and should look like this: \"Hosted :::info **If you purchased your domain with AWS, you can skip this step.** ::: You'll need to create NS records in your domain registrar that point to the AWS Route 53 name servers which we got in the previous step. For example, in CloudFlare, the NS records would look like this: ![CloudFlare NS Records Screenshot](/img/domains-tutorial/ns-records.png) In your Compose file, specify the domain name you want to use, for example: ```yaml services: web: domainname: nextjs.defang.chewydemos.com build: context: ./web dockerfile: Dockerfile ports: target: 3000 mode: ingress ``` Run the following command to deploy your service: ```bash defang compose up ``` This will deploy your service and set up the necessary DNS records in Route 53 as seen in the screenshot below as well as provision SSL certificates. You can now access your service at the domain you specified in your Compose file. \"Route *** For a deeper discussion of how to use a custom domain with Defang, see our [Domain](/docs/concepts/domains) concept docs.", "path": "/docs/tutorials/use-your-own-domain-name" }, { - "id": 65, + "id": 103, + "about": "Estimating AWS Deployment Costs, Clone the sample, Open the compose file, Generate an estimate, Deploying your project", + "text": "Defang enables you to estimate the cost of deploying and running your project without needing to create an account with your cloud provider. This tutorial will walk through estimating the costs of deploying the [django-postgres](https://github.com/DefangLabs/samples/tree/main/samples/django-postgres) to AWS. ``` defang generate # select `django-postgres` (or any other sample) from the list cd django-postgres # or the name of your project's working directory ``` Let's take a quick look at the compose file. There are two services: `db` and `django`. Defang can generate a cost estimate of running the project described by this compose file in the cloud. Notice that each service has a [`deploy.resources.reservations`](/docs/concepts/compose#deploy) property which describes the service's resource requirements. These numbers will have a significant impact on the cost of running these services in the cloud. The estimation defang generates for you will be sensitive to these numbers. ```yaml services: db: restart: unless-stopped image: postgres:16 x-defang-postgres: true environment: - POSTGRES_DB=django - POSTGRES_USER=django - POSTGRES_PASSWORD ports: - mode: host target: 5432 published: 5432 healthcheck: test: [\"CMD\", \"python3\", \"-c\", \"import sys, urllib.request; urllib.request.urlopen(sys.argv[1]).read()\", \"http://localhost:8000/\"] deploy: resources: reservations: cpus: '0.5' memory: 256M django: restart: unless-stopped build: ./app ports: - mode: ingress target: 8000 published: 8000 environment: - DB_HOST=db - DEBUG=False - POSTGRES_USER=django - POSTGRES_DB=django - POSTGRES_PASSWORD - SECRET_KEY - ALLOWED_HOSTS depends_on: - db deploy: resources: reservations: cpus: '0.5' memory: 256M ``` Generating an estimate is easy. All you need to do is run ``` defang estimate ``` Defang can deploy a project according to different [deployment modes](/docs/concepts/deployment-modes). By default, defang will estimate the cost deploying with the `affordable` mode. If you would like to increase your application's resiliency, you can deploy will the `balanced` or `high_availability` modes. Defang can estimate the cost of deploying using any of these modes. ``` defang estimate --provider aws [--mode affordable|balanced|high_availability] ``` Here is an example of the output you would see if you estimated the cost deploying the [django-postgres](https://github.com/DefangLabs/samples/tree/main/samples/django-postgres) sample using the `balanced` [deployment mode](/docs/concepts/deployment-modes). ``` defang estimate --provider=aws --mode=balanced * Packaging the project files for django at /Users/defang/samples/samples/django-postgres/app * Generating deployment preview * Preparing estimate Estimate for Deployment Mode: BALANCED This mode strikes a balance between cost and availability. Your application will be deployed with spot instances. Databases will be provisioned using resources optimized for production. Services in the \"internal\" network will be deployed to a private subnet with a NAT gateway for outbound internet access. Cost Quantity Service Description $16.43 730 Hours (shared) AWSELB USW2-LoadBalancerUsage $32.85 730 Hours (shared) AmazonEC2 USW2-NatGateway-Hours $25.00 100 %Utilized/mo db AmazonRDS USW2-InstanceUsage:db.r5.large $1.62 14600 GB-Hours django AmazonECS USW2-Fargate-EphemeralStorage-GB-Hours (20 GB * 730 hours) $1.62 365 GB-Hours django AmazonECS USW2-Fargate-GB-Hours (0.50 GB * 730 hours) -$1.14 365 GB-Hours django AmazonECS USW2-Fargate-GB-Hours-SpotDiscount (Estimated @ 70%) $7.39 182.50 vCPU-Hours django AmazonECS USW2-Fargate-vCPU-Hours:perCPU (0.25 vCPU * 730 hours) -$5.17 182.50 vCPU-Hours django AmazonECS USW2-Fargate-vCPU-Hours:perCPU-SpotDiscount (Estimated @ 70%) Estimated Monthly Cost: $78.60 (+ usage) Estimate does not include taxes or Discount Programs. To estimate other modes, use defang estimate --mode=affordable|balanced|high_availability For help with warnings, check our FAQ at https://s.defang.io/warnings ``` This estimate will include a line item for the most significant monthly costs associated with your deployment. Each line item will be associated with a compose service if possible. Some AWS resources will be shared between multiple compose services. Now that you have estimated the costs associated with your project. You are ready to deploy to AWS. ``` defang compose up --provider aws --mode affordable|balanced|high_availability ```", + "path": "/docs/tutorials/estimating-aws-deployment-costs" + }, + { + "id": 104, "about": "Deploy to Your Own Cloud, Choose your cloud", "text": "This tutorial will show you how to deploy your services to your own cloud account using Defang. :::tip Defang makes it easier to deploy to any cloud—in your *own* cloud accounts. We refer to this as [bring-your-own-cloud (BYOC)](/docs/concepts/defang-byoc). If you aren't ready to deploy to your own cloud account, you can use the [Defang Playground](/docs/concepts/defang-playground) to get a feel for how Defang works for free. ::: A good starting point is to choose which cloud provider you want to deploy to. Defang supports the following cloud providers: * [AWS](/docs/tutorials/deploy-to-aws) * [DigitalOcean](/docs/tutorials/deploy-to-digitalocean) * [GCP](/docs/tutorials/deploy-to-gcp) The above will link to a tutorial page for that cloud. If you run into any problems, please note that we have an [FAQ section](/docs/category/faq) for your convenience.", "path": "/docs/tutorials/deploy-to-your-cloud" }, { - "id": 66, + "id": 105, + "about": "Deploy OpenAI Apps to GCP Vertex AI, Add an LLM Service to Your Compose File, Notes:, Redirect Application Traffic, Selecting a Model, Complete Example Compose File, Environment Variable Matrix", + "text": "Let's assume you have an application that uses an OpenAI client library and you want to deploy it to the cloud using **GCP Vertex AI**. This tutorial shows you how **Defang** makes it easy. :::info You must [configure GCP Vertex AI model access](https://cloud.google.com/vertex-ai/generative-ai/docs/control-model-access) for each model you intend to use in your GCP account. ::: Suppose you start with a `compose.yaml` file with one `app` service, like this: ```yaml services: app: build: context: . ports: - 3000:3000 environment: OPENAI_API_KEY: healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] ``` --- You can use Vertex AI without changing your `app` code by introducing a new [`defangio/openai-access-gateway`](https://github.com/DefangLabs/openai-access-gateway) service. We'll call the new service `llm`. This new service will act as a proxy between your application and Vertex AI, and will transparently handle converting your OpenAI requests into Vertex AI requests and Vertex AI responses into OpenAI responses. This allows you to use Vertex AI with your existing OpenAI client SDK. ```diff + llm: + image: defangio/openai-access-gateway + x-defang-llm: true + ports: + - target: 80 + published: 80 + mode: host + environment: + - OPENAI_API_KEY + - GCP_PROJECT_ID + - REGION ``` - The container image is based on [aws-samples/bedrock-access-gateway](https://github.com/aws-samples/bedrock-access-gateway), with enhancements. - `x-defang-llm: true` signals to **Defang** that this service should be configured to use target platform AI services. - New environment variables: - `REGION` is the zone where the services runs (e.g. `us-central1`) - `GCP_PROJECT_ID` is your project to deploy to (e.g. `my-project-456789`) :::tip **OpenAI Key** You no longer need your original OpenAI API Key. We recommend generating a random secret for authentication with the gateway: ```bash defang config set OPENAI_API_KEY --random ``` ::: --- Modify your `app` service to send API calls to the `openai-access-gateway`: ```diff services: app: ports: - 3000:3000 environment: OPENAI_API_KEY: + OPENAI_BASE_URL: \"http://llm/api/v1\" healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] ``` Now, all OpenAI traffic will be routed through your gateway service and onto GCP Vertex AI. --- You should configure your application to specify the model you want to use. ```diff services: app: ports: - 3000:3000 environment: OPENAI_API_KEY: OPENAI_BASE_URL: \"http://llm/api/v1\" + MODEL: \"google/gemini-2.5-pro-preview-03-25\" # for Vertex AI healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] ``` Choose the correct `MODEL` depending on which cloud provider you are using. Ensure you have the necessary permissions to access the model you intend to use. To do this, you can check your [AWS Bedrock model access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access-modify.html) or [GCP Vertex AI model access](https://cloud.google.com/vertex-ai/generative-ai/docs/control-model-access). :::info **Choosing the Right Model** - For **GCP Vertex AI**, use a full model path (e.g., `google/gemini-2.5-pro-preview-03-25`). [See available Vertex AI models](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/call-vertex-using-openai-library#client-setup). ::: Alternatively, Defang supports [model mapping](/docs/concepts/managed-llms/openai-access-gateway/#model-mapping) through the [openai-access-gateway](https://github.com/DefangLabs/openai-access-gateway). This takes a model with a Docker naming convention (e.g. `ai/llama3.3`) and maps it to the closest matching one on the target platform. If no such match can be found, it can fallback onto a known existing model (e.g. `ai/mistral`). These environment variables are `USE_MODEL_MAPPING` (default to true) and `FALLBACK_MODEL` (no default), respectively. ```yaml services: app: build: context: . ports: - 3000:3000 environment: OPENAI_API_KEY: OPENAI_BASE_URL: \"http://llm/api/v1\" MODEL: \"google/gemini-2.5-pro-preview-03-25\" healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] llm: image: defangio/openai-access-gateway x-defang-llm: true ports: - target: 80 published: 80 mode: host environment: - OPENAI_API_KEY - GCP_PROJECT_ID - REGION ``` --- | Variable | GCP Vertex AI | |--------------------|---------------| | `GCP_PROJECT_ID` | Required | | `REGION` | Required | | `MODEL` | Vertex model ID or Docker model name, for example `publishers/meta/models/llama-3.3-70b-instruct-maas` or `ai/llama3.3` | --- You now have a single app that can: - Talk to **GCP Vertex AI** - Use the same OpenAI-compatible client code - Easily switch between models or cloud providers by changing a few environment variables", + "path": "/docs/tutorials/deploy-openai-apps/gcp-vertex" + }, + { + "id": 106, + "about": "Deploy OpenAI Apps on Managed LLMs", + "text": "Defang currently supports using Managed LLMs on AWS Bedrock and GCP Vertex AI. Follow the link below for your specific platform. - [AWS Bedrock](/docs/tutorials/deploy-openai-apps/aws-bedrock/) - [GCP Vertex AI](/docs/tutorials/deploy-openai-apps/gcp-vertex/)", + "path": "/docs/tutorials/deploy-openai-apps/deploy-openai-apps" + }, + { + "id": 107, + "about": "Deploy OpenAI Apps to AWS Bedrock, Add an LLM Service to Your Compose File, Notes:, Redirect Application Traffic, Selecting a Model, Complete Example Compose File, Environment Variable Matrix", + "text": "Let's assume you have an app that uses an OpenAI client library and you want to deploy it to the cloud on **AWS Bedrock**. This tutorial shows you how **Defang** makes it easy. :::info You must [configure AWS Bedrock model access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access-modify.html) for each model you intend to use in your AWS account. ::: Suppose you start with a `compose.yaml` file with one `app` service, like this: ```yaml services: app: build: context: . ports: - 3000:3000 environment: OPENAI_API_KEY: healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] ``` --- You can use AWS Bedrock without changing your `app` code by introducing a new [`defangio/openai-access-gateway`](https://github.com/DefangLabs/openai-access-gateway) service. We'll call the new service `llm`. This new service will act as a proxy between your application and AWS Bedrock, and will transparently handle converting your OpenAI requests into AWS Bedrock requests and Bedrock responses into OpenAI responses. This allows you to use AWS Bedrock with your existing OpenAI client SDK. ```diff + llm: + image: defangio/openai-access-gateway + x-defang-llm: true + ports: + - target: 80 + published: 80 + mode: host + environment: + - OPENAI_API_KEY + - REGION ``` - The container image is based on [aws-samples/bedrock-access-gateway](https://github.com/aws-samples/bedrock-access-gateway), with enhancements. - `x-defang-llm: true` signals to **Defang** that this service should be configured to use target platform AI services. - New environment variables: - `REGION` is the zone where the services runs (for AWS, this is the equivalent of AWS_REGION) :::tip **OpenAI Key** You no longer need your original OpenAI API Key. We recommend generating a random secret for authentication with the gateway: ```bash defang config set OPENAI_API_KEY --random ``` ::: --- Modify your `app` service to send API calls to the `openai-access-gateway`: ```diff services: app: ports: - 3000:3000 environment: OPENAI_API_KEY: + OPENAI_BASE_URL: \"http://llm/api/v1\" healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] ``` Now, all OpenAI traffic will be routed through your gateway service and onto AWS Bedrock. --- You should configure your application to specify the model you want to use. ```diff services: app: ports: - 3000:3000 environment: OPENAI_API_KEY: OPENAI_BASE_URL: \"http://llm/api/v1\" + MODEL: \"anthropic.claude-3-sonnet-20240229-v1:0\" healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] ``` Choose the correct `MODEL` depending on which cloud provider you are using. :::info **Choosing the Right Model** - For **AWS Bedrock**, use a Bedrock model ID (e.g., `anthropic.claude-3-sonnet-20240229-v1:0`). [See available Bedrock models](https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html). ::: Alternatively, Defang supports [model mapping](/docs/concepts/managed-llms/openai-access-gateway/#model-mapping) through the [openai-access-gateway](https://github.com/DefangLabs/openai-access-gateway). This takes a model with a Docker naming convention (e.g. `ai/llama3.3`) and maps it to the closest equivalent on the target platform. If no such match can be found, a fallback can be defined to use a known existing model (e.g. `ai/mistral`). These environment variables are `USE_MODEL_MAPPING` (default to true) and `FALLBACK_MODEL` (no default), respectively. ```yaml services: app: build: context: . ports: - 3000:3000 environment: OPENAI_API_KEY: OPENAI_BASE_URL: \"http://llm/api/v1\" MODEL: \"anthropic.claude-3-sonnet-20240229-v1:0\" healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] llm: image: defangio/openai-access-gateway x-defang-llm: true ports: - target: 80 published: 80 mode: host environment: - OPENAI_API_KEY - REGION ``` --- | Variable | AWS Bedrock | |--------------------|-------------| | `REGION` | Required| | `MODEL` | Bedrock model ID or Docker model name, for example `meta.llama3-3-70b-instruct-v1:0` or `ai/llama3.3` | --- You now have a single app that can: - Talk to **AWS Bedrock** - Use the same OpenAI-compatible client code - Easily switch between models or cloud providers by changing a few environment variables", + "path": "/docs/tutorials/deploy-openai-apps/aws-bedrock" + }, + { + "id": 108, "about": "Sample Model Context Protocol Time Chatbot Application, General Overview, Architecture, Setting Up Dockerfiles, Compose File, Testing and Running on Local Machine, Deploying to the Cloud", - "text": "Using [Docker’s `mcp/time` image](https://hub.docker.com/r/mcp/time) and Anthropic Claude, we made a [chatbot application](https://github.com/DefangLabs/samples/tree/main/samples/mcp) that can access time-based resources directly on the user’s local machine and answer time-based questions. The application is containerized using Docker, enabling a convenient and easy way to get it running locally. We will later demonstrate how we deployed it to the cloud using Defang. Let’s go over the structure of the application in a local environment. ![mcp_before](/img/mcp/mcp_before.png) 1. There are two containerized services, Service 1 and Service 2, that sit on the local machine. - Service 1 contains a custom-built web server that interacts with an MCP Client. - Service 2 contains an MCP Server from Docker as a base image for the container, and a custom-built MCP Client we created for interacting with the MCP Server. 2. We have a browser on our local machine, which interacts with the web server in Service 1. 3. The MCP Server in Service 2 is able to access tools from either a cloud or on our local machine. This configuration is included as a part of the Docker MCP image. 4. The MCP Client in Service 2 interacts with the Anthropic API and the web server. **Service 1: Web Server** Service 1 contains a web server and the UI for a chat application (not shown in the diagram), written in Next.js. The chat UI updates based on user-entered queries and chatbot responses. A POST request is sent to Service 1 every time a user enters a query from the browser. In the web server, a Next.js server action function is used to forward the user queries to the endpoint URL of Service 2 to be processed by the MCP Client. **Service 2: MCP Service Configuration** The original Docker `mcp/time` image is not designed with the intent of being deployed to the cloud - it is created for a seamless experience with Claude Desktop. To achieve cloud deployment, an HTTP layer is needed in front of the MCP Server. To address this, we've bundled an MCP Client together with the Server into one container. The MCP Client provides the HTTP interface and communicates with the MCP Server via standard input/output ([stdio](https://modelcontextprotocol.io/docs/concepts/transports#standard-input-output-stdio)). **MCP Client** The [MCP Client](https://modelcontextprotocol.io/quickstart/client) is written in Python, and runs in a [virtual environment](https://docs.python.org/3/library/venv.html) (`/app/.venv/bin`) to accommodate specific package dependencies. The MCP Client is instantiated in a [Quart](https://quart.palletsprojects.com/en/latest/index.html) app, where it connects to the MCP Server and handles POST requests from the web server in Service 1. Additionally, the MCP Client connects to the Anthropic API to request LLM responses. **MCP Server and Tools (from the Docker Image)** The [MCP Server](https://github.com/modelcontextprotocol/servers/tree/main/src/time) enables access to tools from an external source, whether it be from a cloud or from the local machine. This configuration is included as a part of the [Docker MCP image](https://hub.docker.com/r/mcp/time). The tools can be accessed indirectly by the MCP Client through the MCP Server. The Docker image is used as a base image for Service 2, and the MCP Client is built in the same container as the MCP Server. Note that the MCP Server also runs in a virtual environment (`/app/.venv/bin`). **Anthropic API** The MCP Client connects to the [Anthropic API](https://docs.anthropic.com/en/api/getting-started) to request responses from a Claude model. Two requests are sent to Claude for each query. The first request will send the query contents and a list of tools available, and let Claude respond with a selection of the tools needed to craft a response. The MCP Client will then call the tools indirectly through the MCP Server. Once the tool results come back to the Client, a second request is sent to Claude with the query contents and tool results to craft the final response. **Service 1: Web Server - [Dockerfile](https://github.com/DefangLabs/samples/blob/main/samples/mcp/service-1/Dockerfile)** The base image for Service 1 is the `node:bookworm-slim` image. We construct the image by copying the server code and setting an entry point command to start the web server. **Service 2: MCP Service Configuration - [Dockerfile](https://github.com/DefangLabs/samples/blob/main/samples/mcp/service-2/Dockerfile)** The base image for Service 2 is the Docker `mcp/time` image. Since both the MCP Client and Server run in a virtual environment, we activate a `venv` command in the Dockerfile for Service 2 and create a `run.sh` shell script that runs the file containing the MCP Client and Server connection code. We then add the shell script as an entry point command for the container. To define Services 1 and 2 as Docker containers, we’ve written a `compose.yaml` file in the root directory, as shown below. ```yaml services: service-1: # Web Server and UI build: context: ./service-1 dockerfile: Dockerfile ports: - target: 3000 published: 3000 mode: ingress deploy: resources: reservations: memory: 256M environment: - MCP_SERVICE_URL=http://service-2:8000 healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] service-2: # MCP Service (MCP Client and Server) build: context: ./service-2 dockerfile: Dockerfile ports: - target: 8000 published: 8000 mode: host environment: - ANTHROPIC_API_KEY ``` Now that we’ve defined our application in Docker containers using a `compose.yaml` file, we can test and run it on our local machine by running the command: ```bash docker compose up --build ``` Once the application is started up, it can be easily tested in a local environment. However, to make it easily accessible to others online, we should deploy it to the cloud. Fortunately, deploying the application is a straightforward process using Defang, particularly since the application is [Compose-compatible](https://docs.defang.io/docs/concepts/compose). Let’s go over the structure of the application after cloud deployment. ![mcp_after](/img/mcp/mcp_after.png) Here we can see what changes if we deploy to the cloud: 1. Service 1 and Service 2 are now deployed to the cloud, not on the local machine anymore. 2. The only part on the local machine is the browser. Using the same `compose.yaml` file as shown earlier, we can deploy the containers to the cloud with the [Defang CLI](https://docs.defang.io/docs/getting-started). Once we’ve authenticated and logged in, we can [choose a cloud provider (i.e. AWS, GCP, or DigitalOcean)](https://docs.defang.io/docs/tutorials/deploy-to-your-cloud) and use our own cloud account for deployment. Then, we can set a configuration variable for the Anthropic API key: ```bash defang config set ANTHROPIC_API= ``` Then, we can run the command: ```bash defang compose up ``` Now, the [MCP time chatbot application](https://github.com/DefangLabs/samples/tree/main/samples/mcp) will be up and running in the cloud. This means that anyone can access the application online and try it for themselves! For our case, anyone can use the chatbot to ask for the exact time or convert time zones from their machine, regardless of where they are located. ![mcp_time_chatbot](/img/mcp/mcp_time_chatbot.png) Most importantly, this chatbot application can be adapted to use any of the other Docker reference [MCP Server images](https://hub.docker.com/u/mcp), not just the `mcp/time` server. Have fun building and deploying MCP-based containerized applications to the cloud with Defang!", - "path": "/blog/2025-02-18-model-context-protocol" + "text": "Using [Docker’s `mcp/time` image](https://hub.docker.com/r/mcp/time) and Anthropic Claude, we made a [chatbot application](https://github.com/DefangLabs/samples/tree/main/samples/mcp) that can access time-based resources directly on the user’s local machine and answer time-based questions. The application is containerized using Docker, enabling a convenient and easy way to get it running locally. We will later demonstrate how we deployed it to the cloud using Defang. Let’s go over the structure of the application in a local environment. ![mcp_before](/img/mcp/mcp_before.png) 1. There are two containerized services, Service 1 and Service 2, that sit on the local machine. - Service 1 contains a custom-built web server that interacts with an MCP Client. - Service 2 contains an MCP Server from Docker as a base image for the container, and a custom-built MCP Client we created for interacting with the MCP Server. 2. We have a browser on our local machine, which interacts with the web server in Service 1. 3. The MCP Server in Service 2 is able to access tools from either a cloud or on our local machine. This configuration is included as a part of the Docker MCP image. 4. The MCP Client in Service 2 interacts with the Anthropic API and the web server. **Service 1: Web Server** Service 1 contains a web server and the UI for a chat application (not shown in the diagram), written in Next.js. The chat UI updates based on user-entered queries and chatbot responses. A POST request is sent to Service 1 every time a user enters a query from the browser. In the web server, a Next.js server action function is used to forward the user queries to the endpoint URL of Service 2 to be processed by the MCP Client. **Service 2: MCP Service Configuration** The original Docker `mcp/time` image is not designed with the intent of being deployed to the cloud - it is created for a seamless experience with Claude Desktop. To achieve cloud deployment, an HTTP layer is needed in front of the MCP Server. To address this, we've bundled an MCP Client together with the Server into one container. The MCP Client provides the HTTP interface and communicates with the MCP Server via standard input/output ([stdio](https://modelcontextprotocol.io/docs/concepts/transports#standard-input-output-stdio)). **MCP Client** The [MCP Client](https://modelcontextprotocol.io/quickstart/client) is written in Python, and runs in a [virtual environment](https://docs.python.org/3/library/venv.html) (`/app/.venv/bin`) to accommodate specific package dependencies. The MCP Client is instantiated in a [Quart](https://quart.palletsprojects.com/en/latest/index.html) app, where it connects to the MCP Server and handles POST requests from the web server in Service 1. Additionally, the MCP Client connects to the Anthropic API to request LLM responses. **MCP Server and Tools (from the Docker Image)** The [MCP Server](https://github.com/modelcontextprotocol/servers/tree/main/src/time) enables access to tools from an external source, whether it be from a cloud or from the local machine. This configuration is included as a part of the [Docker MCP image](https://hub.docker.com/r/mcp/time). The tools can be accessed indirectly by the MCP Client through the MCP Server. The Docker image is used as a base image for Service 2, and the MCP Client is built in the same container as the MCP Server. Note that the MCP Server also runs in a virtual environment (`/app/.venv/bin`). **Anthropic API** The MCP Client connects to the [Anthropic API](https://docs.anthropic.com/en/api/getting-started) to request responses from a Claude model. Two requests are sent to Claude for each query. The first request will send the query contents and a list of tools available, and let Claude respond with a selection of the tools needed to craft a response. The MCP Client will then call the tools indirectly through the MCP Server. Once the tool results come back to the Client, a second request is sent to Claude with the query contents and tool results to craft the final response. **Service 1: Web Server - [Dockerfile](https://github.com/DefangLabs/samples/blob/main/samples/mcp/service-1/Dockerfile)** The base image for Service 1 is the `node:bookworm-slim` image. We construct the image by copying the server code and setting an entry point command to start the web server. **Service 2: MCP Service Configuration - [Dockerfile](https://github.com/DefangLabs/samples/blob/main/samples/mcp/service-2/Dockerfile)** The base image for Service 2 is the Docker `mcp/time` image. Since both the MCP Client and Server run in a virtual environment, we activate a `venv` command in the Dockerfile for Service 2 and create a `run.sh` shell script that runs the file containing the MCP Client and Server connection code. We then add the shell script as an entry point command for the container. To define Services 1 and 2 as Docker containers, we’ve written a `compose.yaml` file in the root directory, as shown below. ```yaml services: service-1: # Web Server and UI build: context: ./service-1 dockerfile: Dockerfile ports: - target: 3000 published: 3000 mode: ingress deploy: resources: reservations: memory: 256M environment: - MCP_SERVICE_URL=http://service-2:8000 healthcheck: test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:3000/\"] service-2: # MCP Service (MCP Client and Server) build: context: ./service-2 dockerfile: Dockerfile ports: - target: 8000 published: 8000 mode: host environment: - ANTHROPIC_API_KEY ``` Now that we’ve defined our application in Docker containers using a `compose.yaml` file, we can test and run it on our local machine by running the command: ```bash docker compose up --build ``` Once the application is started up, it can be easily tested in a local environment. However, to make it easily accessible to others online, we should deploy it to the cloud. Fortunately, deploying the application is a straightforward process using Defang, particularly since the application is [Compose-compatible](/docs/concepts/compose). Let’s go over the structure of the application after cloud deployment. ![mcp_after](/img/mcp/mcp_after.png) Here we can see what changes if we deploy to the cloud: 1. Service 1 and Service 2 are now deployed to the cloud, not on the local machine anymore. 2. The only part on the local machine is the browser. Using the same `compose.yaml` file as shown earlier, we can deploy the containers to the cloud with the [Defang CLI](/docs/getting-started). Once we’ve authenticated and logged in, we can [choose a cloud provider (i.e. AWS, GCP, or DigitalOcean)](/docs/tutorials/deploy-to-your-cloud) and use our own cloud account for deployment. Then, we can set a configuration variable for the Anthropic API key: ```bash defang config set ANTHROPIC_API= ``` Then, we can run the command: ```bash defang compose up ``` Now, the [MCP time chatbot application](https://github.com/DefangLabs/samples/tree/main/samples/mcp) will be up and running in the cloud. This means that anyone can access the application online and try it for themselves! For our case, anyone can use the chatbot to ask for the exact time or convert time zones from their machine, regardless of where they are located. ![mcp_time_chatbot](/img/mcp/mcp_time_chatbot.png) Most importantly, this chatbot application can be adapted to use any of the other Docker reference [MCP Server images](https://hub.docker.com/u/mcp), not just the `mcp/time` server. Have fun building and deploying MCP-based containerized applications to the cloud with Defang!", + "path": "/blog/2025/02/18/model-context-protocol" }, { - "id": 67, + "id": 109, "about": "**Defang is live on Product Hunt!**", "text": "The moment is finally here – **Defang V1 is officially LIVE on Product Hunt!** 🎉 \"Defang Over the past few months, our team has been working tirelessly to create a tool that transforms how developers develop, deploy, and debug cloud apps. With Defang, you can go from idea to your favorite cloud in minutes. 🚀 Today, we have the opportunity to showcase Defang to a global audience, and your support could make all the difference! If you already have a Product Hunt account, it's super easy. * ✅ You can support our product if you like what we have built so far * ✅ You can leave a comment and any feedback you may have (comments are great!) * ✅ You can leave a review Product Hunt launches are time sensitive as they last 24 hours, so if you have 30 seconds available right now, it would really mean a lot. If you don't already have a Product Hunt account, please don't create one now to upvote (we may get penalized for that). Instead, you can like and share our (e.g. LinkedIn, Twitter, Instagram or Facebook) posts . Thank you in advance. Your support means the world.", - "path": "/blog/2024-12-11-product-hunt" + "path": "/blog/2024/12/11/product-hunt" }, { - "id": 68, + "id": 110, + "about": "🚀 Managed LLMs in Playground", + "text": "You can now try managed LLMs directly in the Defang Playground. Defang makes it easy to use cloud-native language models across providers — and now you can test them instantly in the Playground. - Managed LLM support - Playground-ready - Available in CLI v1.1.22 or higher To use managed language models in your own Defang services, just add `x-defang-llm: true` — Defang will configure the appropriate roles and permissions for you. Already built on the OpenAI API? No need to rewrite anything. With Defang's OpenAI Access Gateway, you can run your existing apps on Claude, DeepSeek, Mistral, and more — using the same OpenAI format. [Learn more here.](/docs/concepts/managed-llms/managed-language-models) [Try it out here.](https://github.com/DefangLabs/samples/tree/main/samples/managed-llm)", + "path": "/blog/2025/06/06/may-product-updates" + }, + { + "id": 111, + "about": "📦 MongoDB Preview on AWS", + "text": "Last month, we added support for MongoDB-compatible workloads on AWS via Amazon DocumentDB. Just add this to your `compose.yaml`: ```yaml services: db: x-defang-mongodb: true ``` Once you add `x-defang-mongodb: true`, Defang will auto-spin a DocumentDB cluster in your AWS — no setup needed.", + "path": "/blog/2025/06/06/may-product-updates" + }, + { + "id": 112, + "about": "🛠 MCP Server Improvements", + "text": "We've made the MCP Server and CLI easier to use and deploy: - Users are now prompted to agree to Terms of Service via the portal login - MCP Server and CLI are now containerized, enabling faster setup, smoother deployments, and better portability across environments", + "path": "/blog/2025/06/06/may-product-updates" + }, + { + "id": 113, + "about": "🌎 Events and Community", + "text": "We kicked off the month by sponsoring Vancouver's first [Vibe Coding IRL](https://www.linkedin.com/feed/update/urn:li:activity:7336086817357606914) Sprint. Jordan Stephens from Defang ran a hands-on workshop [on \"Ship AI Faster with Vertex AI\" with GDG Vancouver](https://www.linkedin.com/posts/gdgyvr_gdgvancouver-buildwithai2025-vertexai-activity-7334288539473334272-LHBl) ([GDG Vancouver](https://gdg.community.dev/gdg-vancouver/)). Around the same time, our CTO and Co-founder Lio joined the [GenAI Founders Fireside panel](https://www.linkedin.com/posts/jenniferyli_genai-founders-fireside-networking-luma-activity-7330702064706117634-D7Hl) hosted by [AInBC](https://ainbc.ai/) and AWS. Big moment for the team — we won the [Best Canadian Cloud Award](https://www.linkedin.com/feed/update/urn:li:activity:7333554571568435200) at the Vancouver Cloud Summit. Right after, we hit the expo floor at [Web Summit Vancouver](https://www.linkedin.com/posts/defanglabs_defang-websummit-websummitvancouver-activity-7333898557185302528-Rvsm) as part of the BETA startup program and got featured by FoundersBeta as one of the [Top 16 Startups to Watch](https://thefounderspress.com/web-summit-vancouver-top-companies-to-watch-in-2025/). Our Campus Advocates also kept the momentum going, hosting [Defang events](https://www.linkedin.com/feed/update/urn:li:activity:7334554265056636928) around the world with live demos and workshops. Last month's Defang Coffee Chat brought together the community for product updates, live demos, and a great convo on vibe deploying. We're back again on June 25 at 10 AM PST. [Save your spot here.](https://lu.ma/bamhymdq) We can't wait to see what you deploy with Defang. [Join our Discord](http://s.defang.io/discord) to ask questions, get support, and share your builds. More coming in June.", + "path": "/blog/2025/06/06/may-product-updates" + }, + { + "id": 114, "about": "**🚀 Defang V1: Launch Week is Here!**, **What’s New in Defang CLI V1?**, **Launch Week Activities**, **Join the Celebration 🎉**", - "text": "At **Defang**, we’re enabling developers go from **idea to code to deployment 10x faster**. We’re thrilled to announce that **Defang V1** is officially launching during our action-packed **Launch Week**, running from **December 4–10, 2024**! This marks a major milestone as we officially release the tools and features developers have been waiting for. **Defang is a powerful tool that lets you easily develop, deploy, and debug production-ready cloud applications**. With Defang V1, we continue to deliver on our vision to make **cloud development effortlessly simple and portable, with the ability to develop once and deploy anywhere**. Here’s what’s included in this milestone release: - **Production-Ready Support for AWS** Seamlessly deploy and scale with confidence on [AWS](https://docs.defang.io/docs/providers/aws). Defang is now [WAFR](https://aws.amazon.com/premiumsupport/business-support-well-architected/)-compliant, assuring that your deployments conform to all the best-practices for AWS deployments. Defang is now officially part of the [AWS Partner Network](https://aws.amazon.com/partners/). - **New - Google Cloud Platform ([GCP](https://docs.defang.io/docs/providers/gcp)) in Preview** This week, we are excited to unveil support for deployments to GCP, in Preview. Start building and exploring and give us feedback as we work to enhance the experience further and move towards production support. Defang is also now officially part of the [Google Cloud Partner Advantage](https://cloud.google.com/partners?hl=en) program. - **Support for DigitalOcean in Preview** Developers using [DigitalOcean](https://docs.defang.io/docs/providers/digitalocean) can explore our Preview features, with further enhancements and production support coming soon. **Defang Product Tiers and Introductory Pricing 🛠️** As we move into V1, we are also rolling out our differentiated product tiers, along with our special introductory pricing. Fear not, we will always have a free tier for hobbyists - conveniently called the Hobby tier. We now also provide Personal, Pro, and Enterprise tiers for customers with more advanced requirements. Check out what is included in each [here](https://defang.io/pricing/). And as always, the Defang CLI is and remains [open-source](https://github.com/DefangLabs). We’ve lined up an exciting week of activities to showcase the power of **Defang** and bring together our growing community: - **December 4: Vancouver CDW x AWS re:Invent Watch Party** Join us at the Vancouver [CDW x AWS re:Invent Watch Party](https://lu.ma/1r0zsw76), where we will have a booth showcasing Defang’s capabilities and AWS integration. Stop by to learn more about Defang and see a live demo from the Defang dev team. - **December 5–6: GFSA DemoDay and Git Push to 2025: Devs Social Party** Hear directly from Defang’s co-founder and CTO, Lio Lunesu, as we unveil Defang’s support for GCP at the Google for Startups Accelerator (GFSA) [DemoDay](https://rsvp.withgoogle.com/events/google-for-startups-accelerator-canada-demo-day) event in Toronto. This event will also be live-streamed [here](https://rsvp.withgoogle.com/events/google-for-startups-accelerator-canada-demo-day/forms/registration). Additionally, join us on December 5th for the final [meetup](https://lu.ma/x2lridge) of the year for Vancouver’s developer groups, hosted by VanJS in collaboration with other local dev communities. - **December 6 & 7: MLH Global Hack Week (GHW)** Join us during [MLH Global Hack Week](https://ghw.mlh.io/schedule) for hands-on workshops and learn how to build production-ready cloud applications in minutes with Defang. - **December 7: Cloud Chat** An IRL event with our team to explore V1 features in depth, answer your questions, and share insights from our journey. - **December 10: Product Hunt Launch** Be part of our Product Hunt debut and show your support as we reach the broader tech community. This launch week is not just about us. It is about **you**, our community. Here is how you can get involved: 1.\t**Explore the Platform**: Sign up at [Defang.io](https://defang.io/) and dive into V1. 2.\t**Attend Events**: Mark your calendar for our scheduled activities. 3.\t**Spread the Word**: Follow us on [LinkedIn](https://www.linkedin.com/company/defanglabs/) and [X](https://x.com/defanglabs), share your experiences, and let others know why you love Defang. We’re excited to celebrate this milestone with all of you. Stay tuned for more updates, and let’s make Launch Week unforgettable!", - "path": "/blog/2024-12-04-launch-week" + "text": "At **Defang**, we’re enabling developers go from **idea to code to deployment 10x faster**. We’re thrilled to announce that **Defang V1** is officially launching during our action-packed **Launch Week**, running from **December 4–10, 2024**! This marks a major milestone as we officially release the tools and features developers have been waiting for. **Defang is a powerful tool that lets you easily develop, deploy, and debug production-ready cloud applications**. With Defang V1, we continue to deliver on our vision to make **cloud development effortlessly simple and portable, with the ability to develop once and deploy anywhere**. Here’s what’s included in this milestone release: - **Production-Ready Support for AWS** Seamlessly deploy and scale with confidence on [AWS](/docs/providers/aws). Defang is now [WAFR](https://aws.amazon.com/premiumsupport/business-support-well-architected/)-compliant, assuring that your deployments conform to all the best-practices for AWS deployments. Defang is now officially part of the [AWS Partner Network](https://aws.amazon.com/partners/). - **New - Google Cloud Platform ([GCP](/docs/providers/gcp)) in Preview** This week, we are excited to unveil support for deployments to GCP, in Preview. Start building and exploring and give us feedback as we work to enhance the experience further and move towards production support. Defang is also now officially part of the [Google Cloud Partner Advantage](https://cloud.google.com/partners?hl=en) program. - **Support for DigitalOcean in Preview** Developers using [DigitalOcean](/docs/providers/digitalocean) can explore our Preview features, with further enhancements and production support coming soon. **Defang Product Tiers and Introductory Pricing 🛠️** As we move into V1, we are also rolling out our differentiated product tiers, along with our special introductory pricing. Fear not, we will always have a free tier for hobbyists - conveniently called the Hobby tier. We now also provide Personal, Pro, and Enterprise tiers for customers with more advanced requirements. Check out what is included in each [here](https://defang.io/pricing/). And as always, the Defang CLI is and remains [open-source](https://github.com/DefangLabs). We’ve lined up an exciting week of activities to showcase the power of **Defang** and bring together our growing community: - **December 4: Vancouver CDW x AWS re:Invent Watch Party** Join us at the Vancouver [CDW x AWS re:Invent Watch Party](https://lu.ma/1r0zsw76), where we will have a booth showcasing Defang’s capabilities and AWS integration. Stop by to learn more about Defang and see a live demo from the Defang dev team. - **December 5–6: GFSA DemoDay and Git Push to 2025: Devs Social Party** Hear directly from Defang’s co-founder and CTO, Lio Lunesu, as we unveil Defang’s support for GCP at the Google for Startups Accelerator (GFSA) [DemoDay](https://rsvp.withgoogle.com/events/google-for-startups-accelerator-canada-demo-day) event in Toronto. This event will also be live-streamed [here](https://rsvp.withgoogle.com/events/google-for-startups-accelerator-canada-demo-day/forms/registration). Additionally, join us on December 5th for the final [meetup](https://lu.ma/x2lridge) of the year for Vancouver’s developer groups, hosted by VanJS in collaboration with other local dev communities. - **December 6 & 7: MLH Global Hack Week (GHW)** Join us during [MLH Global Hack Week](https://ghw.mlh.io/schedule) for hands-on workshops and learn how to build production-ready cloud applications in minutes with Defang. - **December 7: Cloud Chat** An IRL event with our team to explore V1 features in depth, answer your questions, and share insights from our journey. - **December 10: Product Hunt Launch** Be part of our Product Hunt debut and show your support as we reach the broader tech community. This launch week is not just about us. It is about **you**, our community. Here is how you can get involved: 1.\t**Explore the Platform**: Sign up at [Defang.io](https://defang.io/) and dive into V1. 2.\t**Attend Events**: Mark your calendar for our scheduled activities. 3.\t**Spread the Word**: Follow us on [LinkedIn](https://www.linkedin.com/company/defanglabs/) and [X](https://x.com/defanglabs), share your experiences, and let others know why you love Defang. We’re excited to celebrate this milestone with all of you. Stay tuned for more updates, and let’s make Launch Week unforgettable!", + "path": "/blog/2024/12/04/launch-week" }, { - "id": 69, + "id": 115, "about": "Introducing Defang's New Look: Website & Logo Refresh, Why the Refresh?, Here's what's new:, 1. Refining Our Messaging, 2. A Sleek New Logo, 3. A Redesigned Website, Rolling Out the Refresh, We'd Love Your Feedback!", "text": "Over the last couple of years, as we have been building Defang, we've learnt a lot about the key needs of developers in deploying their applications to the cloud - the primacy of a simple developer experience, while at the same time providing a flexible and production-ready solution that can work seamlessly with all of the popular cloud platform targets. In response, we have been constantly evolving our product functionality to address those needs in the simplest yet most powerful way we can come up with. While certainly there is a long way to go, we have definitely come a long way since we started. As we reflected on our journey, we realized our branding and messaging needed to better reflect Defang's current value-proposition. That's why today, we're excited to unveil our brand refresh, our first since the early days of Defang. As Defang evolves, so does our message: - **Our Promise**: Develop Anything, Deploy Anywhere. - **What We Enable**: Any App, Any Stack, Any Cloud. - **How It Works**: Take your app from Docker Compose to a secure, scalable deployment on your favorite cloud in minutes. We've modernized our logo while keeping the core hexagonal design. The new look symbolizes Defang's role in seamlessly deploying any cloud application to any cloud. We've refreshed our website with a sleek, intuitive design and a modern user experience to better showcase Defang's capabilities. Starting today, you'll see these updates across our [Defang.io](https://defang.io) homepage and social media platforms ([Twitter](https://x.com/DefangLabs), [LinkedIn](https://www.linkedin.com/company/defanglabs), [Discord](https://s.defang.io/discord), [BlueSky](https://bsky.app/profile/defanglabs.bsky.social)). In the coming days, we'll extend this refresh across all our digital assets. Check out the new look and let us know what you think! And if you haven't, please join us on Discord and follow us on social media.", - "path": "/blog/2025-03-12-website-logo-refresh-announcement" + "path": "/blog/2025/03/12/website-logo-refresh-announcement" }, { - "id": 70, + "id": 116, + "about": "Deploying a Django App with Real-time Moderation Using Defang, Clone the repo, Overview of Our Django Application, Running Locally, Application Features, Real-time Chat, Background Moderation Tasks, Django Admin, Deploying with Defang, Deploying to Defang Playground, Deploying to Your Own Cloud, Cloud Deployment Results, Why Use Defang?, Try It Yourself", + "text": "In this guide, we'll walk through the easiest and fastest way to deploy a full-featured Django application—including real-time chat and background task processing—to the cloud using Defang. You'll see firsthand how simple Defang makes it to deploy apps that require multiple services like web servers, background workers, Redis, and Postgres. Before we get started, you'll want to clone the repo with the app code, [here](https://github.com/DefangLabs/django-chat-demo). We're deploying a real-time chat application that includes automatic moderation powered by a background worker using the Natural Language Toolkit (NLTK). The application structure includes: - **Web Service**: Django app with chat functionality using Django Channels for real-time interactions. - **Worker Service**: Background tasks processing messages for profanity and sentiment analysis. - **Postgres Database**: Managed database instance for persistent storage. - **Redis Broker**: Managed Redis instance serving as the broker for Celery tasks and Django Channels. To run the app locally, we use Docker Compose, splitting configurations into two YAML files: - `compose.yaml`: Production configuration. - `compose.dev.yaml`: Development overrides extending production. You can quickly spin up the application locally with: ```bash docker compose --env-file .env.dev -f compose.dev.yaml up --build ``` This runs things with autoreloading so you can iterate on the Django app, all while passing environment variables in the same way as we will with Defang's [secure configuration system](/docs/concepts/configuration) and being ready to deploy to production. Using Django Channels and Redis, users can engage in real-time conversations within chat rooms. The worker service runs independently, handling moderation tasks asynchronously. It uses NLTK to: - Check for profanity. - Perform sentiment analysis. - Automatically flag negative or inappropriate messages. This decouples resource-intensive tasks from the main API server, ensuring optimal application responsiveness. The demo isn't doing anything very complicated, but you could easily run machine learning models [with access to GPUs](/docs/tutorials/deploy-with-gpu) with Defang if you needed to. The Django admin is setup to quickly visualize messages and their moderation status. Access it at `/admin` with your superuser credentials: username `admin` and password `admin` setup by default when you first run or deploy. Deploying multi-service applications to cloud providers traditionally involves complex infrastructure setup, including configuring ECS clusters, security groups, networking, and more. Defang simplifies this significantly. The Defang Playground lets you quickly preview your deployed app in a managed environment. **Secure Configuration** Before deploying, securely set encrypted sensitive values: ```bash defang config set DJANGO_SECRET_KEY defang config set POSTGRES_PASSWORD ``` Then run the deployment command: ```bash defang compose up ``` Defang automatically: - Builds Docker containers. - Sets up required services. - Manages networking and provisioning. Once deployed, your app is accessible via a public URL provided by Defang, which you can find in the CLI output or in our portal at [https://portal.defang.io](https://portal.defang.io) To deploy directly into your AWS account (or other [supported providers](/docs/category/providers)): 1. Set your cloud provider: > In my case, I use an AWS Profile, but you should be able to use [any methods supported by the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) ```bash export DEFANG_PROVIDER=AWS export AWS_PROFILE=your-profile-name ``` **Secure Configuration** Before deploying, securely set encrypted sensitive values in your cloud account: ```bash defang config set DJANGO_SECRET_KEY defang config set POSTGRES_PASSWORD ``` 2. Deploy: ```bash defang compose up ``` Defang handles provisioning managed services (RDS for Postgres, ElastiCache for Redis), container builds, and networking setup. Note: Initial provisioning for managed data stores might take a few minutes. Post-deployment, your Django app infrastructure includes (among other things): - **Managed Postgres**: AWS RDS instance. - **Managed Redis**: AWS ElastiCache instance. - **Containers**: ECS services with load balancers and DNS configured. Defang simplifies complex cloud deployments by: - Automatically provisioning managed cloud resources. - Securely handling sensitive configurations. - Providing seamless container orchestration without manual infrastructure setup. Explore deploying your Django applications effortlessly with Defang. The full source code for this example is available on [GitHub](https://github.com/DefangLabs/django-chat-demo). Feel free to give it a try, and let us know how it goes! Happy deploying!", + "path": "/blog/2025/04/10/easiest-way-to-deploy-django" + }, + { + "id": 117, "about": "Windows Experience Improvements", "text": "For our Windows users out there, we've made some changes to make your Defang experience even smoother: * You can now install Defang using `winget`, the Windows Package Manager, for a streamlined setup * We've introduced a signed binary for added security and peace of mind Deploying your apps from Windows just got a little bit nicer.", - "path": "/blog/2024-07-01-july-product-updates" + "path": "/blog/2024/07/01/july-product-updates" }, { - "id": 71, + "id": 118, "about": "One-click Deploy", "text": "We've added a new feature that will make it even easier to get started with Defang. We've created a flow where each sample provides a button that allows you to immediately deploy a template with a GitHub action which will automatically deploy the sample to the Defang Playground. That means you can easily make changes by committing them to your brand new repo, and everything automatically updates in the Playground. It's a great way to get started with Defang and start experimenting with your own projects. Try it now [from our portal](https://portal.defang.dev/sample)! ![screenshot of 1-click deploy UI in portal](/img/july-update/1-click-deploy.png)", - "path": "/blog/2024-07-01-july-product-updates" + "path": "/blog/2024/07/01/july-product-updates" }, { - "id": 72, + "id": 119, "about": "Managed Redis Updates", "text": "We first introduced this last month, but we've since rolled it out to everyone. We also added a sample that showcases the power of managed Redis: [BullMQ with Redis](https://github.com/DefangSamples/sample-bullmq-bullboard-redis-template). It demonstrates how you can use BullMQ and BullBoard with a managed Redis instance to create a powerful job queue system so you can easily build robust, scalable applications in AWS with Defang.", - "path": "/blog/2024-07-01-july-product-updates" + "path": "/blog/2024/07/01/july-product-updates" }, { - "id": 73, + "id": 120, "about": "Updated Samples", "text": "We've updated our sample projects to showcase how to use them with Defang, including: * [ASP.NET Core](https://github.com/DefangSamples/sample-csharp-dotnet-template) * [Feathers.js](https://github.com/DefangSamples/sample-feathersjs-template) * [Flask & LangChain](https://github.com/DefangSamples/sample-langchain-template) * [BullMQ with Redis](https://github.com/DefangSamples/sample-bullmq-bullboard-redis-template) Check them out if you're looking for some inspiration or a starting point for your own projects.", - "path": "/blog/2024-07-01-july-product-updates" + "path": "/blog/2024/07/01/july-product-updates" }, { - "id": 74, + "id": 121, "about": "CLI Updates", "text": "We're always looking for ways to enhance the CLI experience. Here's what's new: * `npx defang` automatically checks to always have the latest version of the CLI * The output during `defang compose up` has been streamlined to focus on the most important information * `defang tail` now supports listening to specific services, making it easier to troubleshoot issues * We've improved hints and error messages to better guide you when something goes wrong * The CLI now has improved color support for light theme terminals, making it easier on the eyes It's the small refinements that can make a big difference in your workflow.", - "path": "/blog/2024-07-01-july-product-updates" + "path": "/blog/2024/07/01/july-product-updates" }, { - "id": 75, + "id": 122, "about": "Other Updates", "text": "Here are a few more things that didn't quite fit with the rest: * Visibility into ECS deployment events in BYOC tail logs * Improvements to ACME certificate generation Keep an eye out for these updates in the near future. --- As always, we'd love your help shaping the future of Defang, so let us know what you'd like to see next. Happy deploying! 🚀", - "path": "/blog/2024-07-01-july-product-updates" + "path": "/blog/2024/07/01/july-product-updates" }, { - "id": 76, + "id": 123, "about": "Samples, samples, samples!, Start from a sample in seconds, Sample templates", "text": "We've been cranking out samples like there's no tomorrow. We've published samples to get you up and running with FastAPI, Elysia, Angular, React, Svelte, Sveltekit, Sails.js, Phoenix, and more. You can filter through them on the [Defang homepage](https://defang.io/#deploy). Check out our video about all the [new samples and functionality](https://www.youtube.com/watch?v=8wIU_af-sX8). With all this work we've been putting into samples, we realized it would be pretty awesome if you could clone a sample faster. So we updated the CLI. Now, if you run `defang generate` you'll be able to filter through the samples and choose one. You can also filter through the samples on the [Defang homepage](https://defang.io/#deploy) and clone any of them with a simple command like `defang new sveltekit`. If you look through our [GitHub organization](https://github.com/DefangLabs), you'll start seeing loads of repos with the structure `sample--template`. If you open them, you can create a new repo by clicking this button: ![screenshot of github UI pointing towards template button](https://github.com/DefangLabs/defang-docs/assets/910895/97d33d90-43b9-499a-b139-e114b701adcb) Not only will that create a new repo based on the sample in your account, but if you've used Defang before (and accepted the Terms and Conditions) it will automatically deploy it to the playground so you can start playing with Defang immediately.", - "path": "/blog/2024-06-01-june-product-updates" + "path": "/blog/2024/06/01/june-product-updates" }, { - "id": 77, + "id": 124, "about": "ACME for BYOD", "text": "We’re excited to announce that ACME support is now available for Bring Your Own Domain (BYOD) in both Bring Your Own Cloud (BYOC) and Playground. This means you can easily add Let's Encrypt certificates to your custom domains, regardless of where your DNS is hosted. Defang will handle the certificate generation and automatic renewal for you. Nice and easy.", - "path": "/blog/2024-06-01-june-product-updates" + "path": "/blog/2024/06/01/june-product-updates" }, { - "id": 78, + "id": 125, "about": "Warnings for Stateful Services", "text": "To help you avoid potential pitfalls, we’ve added warnings against deploying stateful services with Defang, since you shouldn't actually be deploying anything stateful with Defang. For example, we'll warn you if you try to deploy services with images like `postgres:`, `redis:`, `minio:`, etc. In the near future we will be offering ways to run some stateful services using cloud providers' managed offerings. For example Redis, Postgres, and S3. Speaking of which...", - "path": "/blog/2024-06-01-june-product-updates" + "path": "/blog/2024/06/01/june-product-updates" }, { - "id": 79, + "id": 126, "about": "Managed Redis!", "text": "Redis is such a versatile tool that can help with so many different use cases. So we've introduced Managed Redis! You can now specify the Redis image in your `compose.yaml` file and indicate that you want it managed by your cloud provider using `x-defang-redis: true` in your service definition.", - "path": "/blog/2024-06-01-june-product-updates" + "path": "/blog/2024/06/01/june-product-updates" }, { - "id": 80, + "id": 127, "about": "Load Testing", "text": "To make sure Defang is ready for loads of new users, we've been doing a lot of load testing. This is going to help us identify and address potential bottlenecks so we can make sure that Defang scales efficiently and handles all you new users smoothly.", - "path": "/blog/2024-06-01-june-product-updates" + "path": "/blog/2024/06/01/june-product-updates" }, { - "id": 81, + "id": 128, "about": "Upgraded Kaniko", "text": "We’ve upgraded our Kaniko integration to version 1.23.0 to improve your container build experience. The new version comes with several bug fixes that enhance stability and performance. This means faster and more reliable builds for your applications.", - "path": "/blog/2024-06-01-june-product-updates" + "path": "/blog/2024/06/01/june-product-updates" }, { - "id": 82, + "id": 129, "about": "Upcoming Features, Managed Postgres, BYOC ECS Lifecycle Events", "text": "We’re not stopping here! Here’s a sneak peek at what’s coming soon: Building on the momentum of Managed Redis, we’re introducing Managed Postgres. Soon you’ll be able to easily integrate a managed Postgres database into your deployment! Defang runs your services with ECS, and we're working on making it clearer what's happening under the hood. --- Stay tuned for more updates and improvements. As always, we appreciate your feedback and are committed to making Defang the easiest way to deploy your applications to the cloud. Go deploy something awesome! 🚀", - "path": "/blog/2024-06-01-june-product-updates" + "path": "/blog/2024/06/01/june-product-updates" }, { - "id": 83, + "id": 130, + "about": "MCP + Vibe Deploying", + "text": "This month we focused on making cloud deployments as easy as writing a prompt. Our latest Vibe Deploying [blog](/blog?_gl=1*1wbnk1m*_ga*MTM4MTE5ODI4NC4xNzQ0MDU0NDMz*_ga_QVJVWN44CW*czE3NDY3NDQwNDckbzQ3JGcxJHQxNzQ2NzQ0NzUzJGowJGwwJGgw) shows how you can launch full-stack apps right from your IDE just by chatting. Whether you're working in **Cursor**, **Windsurf**, **VS Code**, or **Claude**, Defang's **MCP** integration lets you deploy to the cloud just as easily as conversing with the AI to generate your app. For more details, check out the docs for the [Defang Model Context Protocol Server](/docs/concepts/mcp) – it explains how it works, how to use it, and why it's a game changer for deploying to the cloud. You can also watch our tutorials for [Cursor](https://youtu.be/Aicqp9QQdwk?si=h891XNIN_f_U2-fX), [Windsurf](https://youtu.be/QU4q1eULs1I?si=nmSrXLqHtHRRlCVb), and [VS Code](https://youtu.be/k8NbeKKzqiI?si=BxDNJOw_zHnL433w).", + "path": "/blog/2025/05/09/april-product-updates" + }, + { + "id": 131, + "about": "Managed LLMs", + "text": "Last month we shipped the [`x-defang-llm` compose service extension](/docs/concepts/managed-llms/managed-language-models) to easily deploy inferencing apps that use managed LLM services such as AWS Bedrock. This month, we're excited to announce the same support for GCP Vertex AI – give it a try and let us know your feedback!", + "path": "/blog/2025/05/09/april-product-updates" + }, + { + "id": 132, + "about": "Events and Programs", + "text": "On [**April 28**](https://www.linkedin.com/posts/defanglabs_defang-vibecoding-vancouverdev-activity-7323769706681655296-48OA?utm_source=share&utm_medium=member_desktop&rcm=ACoAACNoYXgBadWv4CWLbcKhgSGxWjdmu9e5dFI), we kicked things off with an epic night of demos, dev energy, and cloud magic at RAG & AI in Action. Our own **Kevin Vo** showed how fast and easy it is to deploy AI apps from Windsurf to the cloud using just the **Defang MCP**. The crowd got a front-row look at how **Vibe Deploying** turns cloud infra into a background detail. We finished the month with our signature Defang Coffee Chat, a casual hangout with product updates, live Q&A, and great conversations with our community. Our Campus Advocates also hosted [workshops](https://www.linkedin.com/feed/update/urn:li:activity:7319584997319802880/) around the world, bringing Defang to new students and builders. We wrapped up the month with our latest Defang Coffee Chat, featuring live demos, product updates, and a solid conversation around vibe deploying. Thanks to everyone who joined. The next one is on May 21 at 10 AM PST. Save your spot [here](https://lu.ma/defang-may).", + "path": "/blog/2025/05/09/april-product-updates" + }, + { + "id": 133, + "about": "Looking Ahead", + "text": "Here's what's coming in May: - Web Summit Vancouver – Defang will be a [startup sponsor](https://vancouver.websummit.com/appearances/van25/c2ccbb40-9d0f-4fe4-8808-d02de1a15534/defang/), please come see us on the expo floor. - More MCP tutorials and dev tools. Let's keep building. 🚀", + "path": "/blog/2025/05/09/april-product-updates" + }, + { + "id": 134, "about": "npx defang", "text": "We know a lot of you are using Defang for the first time. To make it easier to get started, we've added a new way to install the Defang CLI. Now you can use npx to run the CLI without installing it globally by running: ```bash npx defang@latest ``` This will download the latest version of the Defang CLI and run it for you. No need to worry about installing or updating the CLI manually!", - "path": "/blog/2024-05-01-may-product-updates" + "path": "/blog/2024/05/01/may-product-updates" }, { - "id": 84, + "id": 135, "about": "(coming soon) Bring Your Own Domain with Let's Encrypt", "text": "Previously you could bring your own domain with Defang BYOC... but you had to host the DNS records with AWS Route 53 in the AWS account you were deploying to. Now you can point domains hosted anywhere to your Defang deployment and we'll take care of the rest. We'll help generate a Let's Encrypt certificate for your domain and automatically renew it for you.", - "path": "/blog/2024-05-01-may-product-updates" + "path": "/blog/2024/05/01/may-product-updates" }, { - "id": 85, + "id": 136, "about": "Windows Support", "text": "Some of you use Defang from a Windows PC and previously a few features didn't work correctly on Windows. Some stuff we've fixed: * ansi color codes in logs * handle ctrl-c when tailing logs", - "path": "/blog/2024-05-01-may-product-updates" + "path": "/blog/2024/05/01/may-product-updates" }, { - "id": 86, + "id": 137, "about": "Improved CLI", "text": "We've made a variety of small tweaks and improvements to the CLI which should make things a little bit cleaner and more stable. * log messages have been made more clear * adding more progress information during compose up", - "path": "/blog/2024-05-01-may-product-updates" + "path": "/blog/2024/05/01/may-product-updates" }, { - "id": 87, + "id": 138, "about": "Stability and Reliability", "text": "Defang is still in Beta and we know we've got to be rock solid by the time we release V1, so we've been working hard to improve the stability and reliability of the Defang architecture. We've been battle-testing different technologies to see how they hold up and have mad a few changes that should make things even better: * capacity improvements in build queues * improvements in log availability", - "path": "/blog/2024-05-01-may-product-updates" + "path": "/blog/2024/05/01/may-product-updates" }, { - "id": 88, + "id": 139, "about": "From S3 + CloudFront to Dynamic, Containerized Deployments", "text": "Our original site was a Next.js app using [static exports](https://nextjs.org/docs/pages/building-your-application/deploying/static-exports) deployed via S3 and fronted by CloudFront. That setup worked for a while—it was fast and simple. But with our brand refresh, we added pages and components where it made sense to use (and test for other developers) some Next.js features that we couldn't use with the static export: - [React Server Components](https://nextjs.org/docs/app/building-your-application/rendering/server-components) - [Server Actions](https://nextjs.org/docs/app/building-your-application/data-fetching/server-actions-and-mutations) - [ISR](https://nextjs.org/docs/app/building-your-application/data-fetching/incremental-static-regeneration) That meant static hosting wouldn't cut it. So we decided to run the site as an app in a container. That being said, our learnings from the previous setup *are* being used to develop the capabilities of Defang. We're using the experience to make sure that Defang can handle the deployment of static sites as well as dynamic ones. We'll keep you updated when that's ready. ---", - "path": "/blog/2025-03-26-deploying-defang-with-defang-part-2" + "path": "/blog/2025/03/26/deploying-defang-with-defang-part-2" }, { - "id": 89, + "id": 140, "about": "Deploying with Defang (and Why It Was Easy)", - "text": "We already deploy our other services with Defang using Compose files. In fact, the static website actually already used a Dockerfile and Compose file to manage the build process. So we just had to make some minor changes to the Compose file to take into account new environment variables for features we're adding and make a few small changes to the Dockerfile to handle the new build process. Some things we had to change: **Adding [ports](https://docs.defang.io/docs/concepts/compose#ports) to the Compose file**: ```yaml ports: - mode: ingress target: 3000 published: 3000 ``` **Adding [domain](https://docs.defang.io/docs/concepts/domains) info the Composer file**: ```yaml domainname: defang.io networks: public: aliases: - www.defang.io ``` One other hiccup was that we used to do www to non-www redirects using S3. There are a few ways to switch that up, but for the time being we decided to use Next.js middleware. Pretty soon after that, the site was up and running in an AWS account—with TLS, DNS, and both the `www` and root domains automatically configured. Pretty straightfoward! ---", - "path": "/blog/2025-03-26-deploying-defang-with-defang-part-2" + "text": "We already deploy our other services with Defang using Compose files. In fact, the static website actually already used a Dockerfile and Compose file to manage the build process. So we just had to make some minor changes to the Compose file to take into account new environment variables for features we're adding and make a few small changes to the Dockerfile to handle the new build process. Some things we had to change: **Adding [ports](/docs/concepts/compose#ports) to the Compose file**: ```yaml ports: - mode: ingress target: 3000 published: 3000 ``` **Adding [domain](/docs/concepts/domains) info the Composer file**: ```yaml domainname: defang.io networks: default: aliases: - www.defang.io ``` One other hiccup was that we used to do www to non-www redirects using S3. There are a few ways to switch that up, but for the time being we decided to use Next.js middleware. Pretty soon after that, the site was up and running in an AWS account—with TLS, DNS, and both the `www` and root domains automatically configured. Pretty straightfoward! ---", + "path": "/blog/2025/03/26/deploying-defang-with-defang-part-2" }, { - "id": 90, + "id": 141, "about": "Real-World Lessons That Are Shaping Defang, 1. Static Assets Still Need CDNs, 2. Next.js Env Vars Can Be Tricky in Containers, 3. Redirects and Rewrites", "text": "Deploying the website wasn't just a checkbox—it helped surface real-world pain points and ideas for improvement. Even though the site is dynamic now, we still want assets like `/_next/static` to load quickly from a CDN. This made it clear that CDN support—like CloudFront integration—should be easier to configure in Defang. That’s now on our roadmap. That's also going to be useful for other frameworks that use similar asset paths, like Django. Next.js splits env vars between build-time and runtime, and the rules aren’t always obvious. Some need to be passed as build args, and others as runtime envs. That made us think harder about how Defang could help clarify or streamline this for developers—even if we can’t change that aspect of Next.js itself. We had to add a middleware to handle www to non-www redirects. This is a common need, so we're keeping an eye on how we can make this easier to deal with in Defang projects. These are the kinds of things we only notice by using Defang on real-world projects. ---", - "path": "/blog/2025-03-26-deploying-defang-with-defang-part-2" + "path": "/blog/2025/03/26/deploying-defang-with-defang-part-2" }, { - "id": 91, + "id": 142, "about": "The Takeaway", "text": "Our site now runs like the rest of our infrastructure: - Fully containerized - Deployed to our own AWS account - Managed with a Compose file - Deployed with Defang Stay tuned for the next post in the series—because this is just one piece of the puzzle.", - "path": "/blog/2025-03-26-deploying-defang-with-defang-part-2" + "path": "/blog/2025/03/26/deploying-defang-with-defang-part-2" }, { - "id": 92, + "id": 143, + "about": "Why Build a Starter Kit for RAG + Agents?", + "text": "Let’s be honest: every developer who’s played with LLMs gets that rush of “wow” from the first working demo. But the real headaches show up when you need to stitch LLMs into something production-grade: an app that can pull in real data, coordinate multi-step logic, and more. Suddenly, you’re not just writing single prompts. You’re coordinating between multiple prompts, managing queues, adding vector databases, orchestrating workers, and trying to get things back to the user in real-time. We've found that [CrewAI](https://www.crewai.com/) (coordinating prompts, agents, tools) + [Django](https://www.djangoproject.com/) (building an api, managing data), with a bit of [Celery](https://docs.celeryproject.org/en/stable/) (orchestrating workers/async tasks), is a really nice set of tools for this. We're also going to use [Django Channels](https://channels.readthedocs.io/en/stable/) (real-time updates) to push updates back to the user. And of course, we'll use [Defang](https://www.defang.io/) to deploy all that to the cloud. If this sounds familiar (or if you're dreading the prospect of dealing with it), you’re the target audience for this sample. Instead of slogging through weeks of configuration and permissions hell, you get a ready-made template that runs on your laptop, then scales—unchanged—to Defang’s Playground, and finally to your own AWS or GCP account. All the gnarly infra is abstracted, so you can focus on getting as much value as possible out of that magical combo of CrewAI and Django. :::info[Just want the sample?] You can [find it here](https://github.com/DefangSamples/sample-crew-django-redis-postgres-template). :::", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 144, + "about": "A Demo in 60 Seconds", + "text": "Imagine you're building a system. It might use multiple LLM calls. It might do complex, branching logic in its prompts. It might need to store embeddings to retrieve things in the future, either to pull them into a prompt, or to return them outright. It might need to store other records that don't have embeddings. Here's a very lightweight version of a system like that, as a starting point: ", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 145, + "about": "Architecture at a Glance", + "text": "Behind the scenes, the workflow is clean and powerful. The browser connects via [WebSockets to our app using Django Channels](https://channels.readthedocs.io/en/latest/deploying.html#http-and-websocket). Heavy work is pushed to a [Celery worker](https://docs.celeryq.dev/en/stable/). That worker generates an [embedding](https://en.wikipedia.org/wiki/Embedding_(machine_learning)), checks [Postgres](https://www.postgresql.org/) with [pgvector](https://github.com/pgvector/pgvector) for a match, and either returns the summary or, if there’s no hit, fires up a [CrewAI agent](https://www.crewai.com/) to generate one. Every update streams back through [Redis](https://redis.io/) and Django Channels so users get progress in real time. \"Architecture\" Durable state lives in Postgres and Redis. Model services ([LLMs](https://en.wikipedia.org/wiki/LLM) and embeddings) are fully swappable, so you can upgrade to different models in the cloud or localize with the [Docker Model Runner](https://docs.docker.com/compose/how-tos/model-runner/) without rewriting the full stack.", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 146, + "about": "Under the Hood: The Services, Django + Channels, PostgreSQL + pgvector, Redis, Celery Worker, LLM and Embedding Services, CrewAI Workflows", + "text": "The Django app is the front door, routing HTTP and WebSocket traffic, serving up the admin, and delivering static content. It’s built on [Daphne](https://github.com/django/daphne) and Django Channels, with Redis as the channel layer for real-time group events. Django’s admin is your friend here: to start you can check what summaries exist, but if you start building out your own app, it'll make it a breeze to debug and manage your system. This is where your data lives. Summaries and their 1024-dimension embeddings go here. A simple SQL query checks for close matches by cosine distance, and pgvector’s index keeps search blazing fast. In BYOC (bring-your-own-cloud) mode, flip a single flag and Defang provisions you a production-grade RDS instance. Redis is doing triple duty: as the message broker and result backend for Celery, and as the channel layer for real-time WebSocket updates. The pub/sub system lets a single worker update all browser tabs listening to the same group. And if you want to scale up, swap a flag and Defang will run managed ElastiCache in production. No code change required. The Celery worker is where the magic happens. It takes requests off the queue, generates embeddings, checks for similar summaries, and—if necessary—invokes a CrewAI agent to get a new summary. It then persists summaries and pushes progress updates back to the user. Thanks to Docker Model Runner, the LLM and embedding services run as containerized, OpenAI-compatible HTTP endpoints. Want to switch to a different model? Change a single line in your compose file. Environment variables like `LLM_URL` and `EMBEDDING_MODEL` are injected for you—no secret sharing or hard-coding required. With CrewAI, your agent logic is declarative and pluggable. This sample keeps it simple—a single summarization agent—but you can add classification, tool-calling, or chain-of-thought logic without rewriting your task runner.", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 147, + "about": "How the Compose Files Work", + "text": "In local dev, your `compose.local.yaml` spins up [Gemma](https://hub.docker.com/r/ai/gemma3) and [Mixedbread](https://hub.docker.com/r/ai/mxbai-embed-large) models, running fully locally and with no cloud credentials or API keys required. URLs for service-to-service communication are injected at runtime. When you’re ready to deploy, swap in the main `compose.yaml` which adds Defang’s `x-defang-llm`, `x-defang-redis`, and `x-defang-postgres` flags. Now, Defang maps your Compose intent to real infrastructure—managed model endpoints, Redis, and Postgres—on cloud providers like AWS or GCP. It handles all networking, secrets, and service discovery for you. There’s no YAML rewriting or “dev vs prod” drift.", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 148, + "about": "The Three-Step Deployment Journey", + "text": "You can run everything on your laptop with a single `docker compose -f ./compose.local.yaml up` command—no cloud dependencies, fast iteration, and no risk of cloud charges. When you’re ready for the next step, use `defang compose up` to push to the Defang Playground. This free hosted sandbox is perfect for trying Defang, demos, or prototyping. It automatically adds TLS to your endpoints and sleeps after a week. For production, use your own AWS or GCP account. `DEFANG_PROVIDER=aws defang compose up` maps each service to a managed equivalent (ECS, RDS, ElastiCache, Bedrock models), wires up secrets, networking, etc. Your infra. Your data.", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 149, + "about": "Some Best Practices and Design Choices", + "text": "This sample uses vector similarity to try and fetch summaries that are semantically similar to the input. For more robust results, you might want to embed the original input. You can also think about chunking up longer content for finer-grained matches that you can integrate in your CrewAI workflows. Real-time progress via Django Channels beats HTTP polling, especially for LLM tasks that can take a while. The app service is stateless, which means you can scale it horizontally just by adding more containers which is easy to specify in your compose file.", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 150, + "about": "Going Further: Extending the Sample", + "text": "You’re not limited to a single summarization agent. CrewAI makes it trivial to add multi-agent flows (classification, tool use, knowledge retrieval). For big docs, chunk-level embeddings allow granular retrieval. You can wire in tool-calling to connect with external APIs or databases. You can integrate more deeply with Django's ORM and the PGVector tooling that we demo'd in the sample to build more complex agents that actually use RAG.", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 151, + "about": "Ready to Build?", + "text": "With this sample, you’ve got an agent-ready, RAG-ready backend that runs anywhere, with no stacks of YAML or vendor lock-in. Fork it, extend it, productionize it: scale up, add more agents, or swap in different models, or more models! Quickstart: ```shell", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 152, + "about": "Local", + "text": "docker compose -f compose.local.yaml up --build", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 153, + "about": "Playground", + "text": "defang compose up", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 154, + "about": "Setup credentials and then swap with aws or gcp", + "text": "DEFANG_PROVIDER= defang compose up ``` Want more? File an [issue](https://github.com/DefangLabs/samples/issues) to request a sample—we'll do everything we can to help you deploy better and faster!", + "path": "/blog/2025/06/16/crew-ai-sample" + }, + { + "id": 155, "about": "September Events", "text": "In Sep, the Defang team participated in a number of events including HacktheNorth, LangaraHacks, MLH Global Hack Week and other MLH hackathons. It’s great to see Defang being put to use by these hackers to build their amazing projects. And we get useful insights from these events that help us improve the product. We will continue to do more of these events in the future - watch our [LinkedIn](https://www.linkedin.com/company/defanglabs) page for announcements. ---", - "path": "/blog/2024-09-30-september-product-updates" + "path": "/blog/2024/09/30/september-product-updates" }, { - "id": 93, + "id": 156, "about": "Google for Startups Accelerator Canada", "text": "Also in Sep, Defang was included in the [Google for Startups Accelerator Canada.](https://www.linkedin.com/posts/irankarimian_ai-startups-dominate-latest-google-for-startups-activity-7241805161411551232-thky?utm_source=share&utm_medium=member_desktop) This is a great recognition of the value Defang is providing to cloud developers. Through our collaboration with Google, we hope to add GCP as another target cloud for Defang in the coming months - stay tuned! --- \"CoffeeChat\" --- As always, we appreciate your feedback and are committed to making Defang the easiest way to develop, deploy, and debug your cloud applications. Go build something awesome! 🚀", - "path": "/blog/2024-09-30-september-product-updates" + "path": "/blog/2024/09/30/september-product-updates" }, { - "id": 94, + "id": 157, "about": "Codespaces", "text": "If you want to get started with Codespaces, you can just fork [this repo](https://github.com/DefangLabs/github-codespace). It comes with the Defang CLI pre-installed so you can start playing with it immediately. It's set up using the Defang CLI [Dev Container Feature](https://github.com/DefangLabs/devcontainer-feature/pkgs/container/devcontainer-feature%2Fdefang-cli).", - "path": "/blog/2024-03-20-dev-environments" + "path": "/blog/2024/03/20/dev-environments" }, { - "id": 95, + "id": 158, "about": "Dev Containers", "text": "[Dev Containers](https://code.visualstudio.com/docs/devcontainers/containers) are a cool way to provision and configure development environments and share that configuration with other people you are working with. It's a spec that is supported by [VS Code](https://code.visualstudio.com/docs/devcontainers/containers), [Visual Studio](https://devblogs.microsoft.com/cppblog/dev-containers-for-c-in-visual-studio/), the JetBrains IDEs like [IntelliJ](https://www.jetbrains.com/help/idea/connect-to-devcontainer.html) and [PyCharm](https://www.jetbrains.com/help/pycharm/connect-to-devcontainer.html), [DevPod](https://devpod.sh/docs/developing-in-workspaces/devcontainer-json) and others.", - "path": "/blog/2024-03-20-dev-environments" + "path": "/blog/2024/03/20/dev-environments" }, { - "id": 96, + "id": 159, "about": "Gitpod", "text": "Gitpod is another Cloud IDE, a bit like GitHub Codespaces. If prefer Gitpod, you can try Defang in a Gitpod environment by using [this repo](https://github.com/DefangLabs/gitpod-workspace/tree/main).", - "path": "/blog/2024-03-20-dev-environments" + "path": "/blog/2024/03/20/dev-environments" }, { - "id": 97, + "id": 160, + "about": "Events and Programs", + "text": "Cloud, NoDevOps, BYOC, Postgres, GCP, DigitalOcean, AWS, CLI, AI, LLMs, Autoscaling, ] author: Defang Team draft: false --- ![Defang Compose Update](/img/defang-compose-update.webp) Wow - another month has gone by, time flies when you're having fun! Let us share some important updates regarding what we achieved at Defang in March: **Managed LLMs:** One of the coolest features we have released in a bit is [support for Managed LLMs (such as AWS Bedrock) through the `x-defang-llm` compose service extension](/docs/concepts/managed-llms/managed-language-models). When coupled with the `defang/openai-access-gateway` service image, Defang offers the easiest way to [migrate your OpenAI-compatible application to cloud-native managed LLMs](/docs/tutorials/deploy-openai-apps) without making any changes to your code. Support for GCP and DigitalOcean coming soon. **Defang Pulumi Provider:** Last month, we announced a preview of the [Defang Pulumi Provider](https://github.com/DefangLabs/pulumi-defang), and this month we are excited to announce that V1 is now available in the [Pulumi Registry](https://www.pulumi.com/registry/packages/defang/). As much as we love Docker, we realize there are many real-world apps that have components that (currently) cannot be described completely in a Compose file. With the Defang Pulumi Provider, you can now leverage [the declarative simplicity of Defang with the imperative power of Pulumi](/docs/concepts/pulumi#when-to-use-the-defang-pulumi-provider). **Production-readiness:** As we onboard more customers, we are fixing many fit-n-finish items: 1. **Autoscaling:** Production apps need the ability to easily scale up and down with load, and so we've added support for autoscaling. By adding the `x-defang-autoscaling: true` extension to your service definition in Compose.yaml file, you can benefit from automatic scale out to handle large loads and scale in when load is low. Learn more [here](/docs/tutorials/scaling-your-services). 2. **New** [CLI](https://github.com/DefangLabs/defang/releases)**:** We've been busy making the CLI more powerful, secure, and intelligent. • Smarter Config Handling: The new --random flag simplifies setup by generating secure, random config values, removing the need for manual secret creation. Separately, automatic detection of sensitive data in Compose files helps prevent accidental leaks by warning you before they are deployed. Together, these features improve security and streamline your workflow. • Time-Bound Log Tailing: Need to investigate a specific window? Use tail --until to view logs up to a chosen time—no more scrolling endlessly. Save time from sifting through irrelevant events and focus your investigation. • Automatic generation of a .dockerignore file for projects that don't already have one, saving you time and reducing image bloat. By excluding common unnecessary files—like .git, node_modules, or local configs—it helps keep your builds clean, fast, and secure right from the start, without needing manual setup. 3. **Networking / Reduce costs:** We have implemented private networks, as mentioned in the official Compose specification. We have also reduced costs by eliminating the need for a pricy NAT Gateway in \"development mode\" deployments! In March, we had an incredible evening at the AWS Gen AI Loft in San Francisco! Our CTO and Co-founder [Lionello Lunesu](https://www.linkedin.com/in/lionello/) demoed how Defang makes deploying secure, scalable, production-ready containerized applications on AWS effortless. Check out the demo [here](https://youtu.be/C0MbB0-6mUk?si=6dEf3gIOmWe7pctK&t=1426)! We also kicked off the [**Defang Campus Advocate Program**](https://defang.io/cap/), bringing together advocates from around the world. After launching the program in February, it was amazing to see the energy and momentum already building on campuses world-wide. Just as one example, check out [this post](https://www.linkedin.com/posts/akash-nath29_defang-backends-defang-activity-7314917342143029250-2_v8?utm_source=share&utm_medium=member_desktop&rcm=ACoAAAAQqiEBLsVLYYAzEmBFB9oIl31nQ7kDII0) from one of the students who attended a session hosted by our Campus Advocate [**Swapnendu Banerjee**](https://www.linkedin.com/in/swapnendu-banerjee-36ba06219/) and then went on to deploy his project with Defang. This is what we live for! We wrapped up the month with our monthly Coffee Chat, featuring the latest Defang updates, live demos, and a conversation on vibe coding. Thanks to everyone who joined. The next one is on April 30. **Save your spot [here](https://lu.ma/r08oz3rl).** As always, we appreciate your feedback and are committed to making Defang even better. Deploy any app to any cloud with a single command. Go build something awesome!", + "path": "/blog/2025/04/11/mar-product-updates" + }, + { + "id": 161, "about": "My Story of Embedded Systems With Defang", "text": "Have you ever looked at a touch screen fridge and wondered how it works? Back in my day (not very long ago), a fridge was just a fridge. No fancy built-in interface, no images displayed, and no wifi. But times have changed, and I’ve learned a lot about embedded systems, thanks to Defang! ![smart_fridge](/img/hardware-story/smart_fridge.png) From my background, I was more into the web development and software side of things. Buffer flushing? Serial monitors? ESP32-S3? These were unheard of. Then one day at Defang, I was suggested to work on a project with a [SenseCAP Indicator](https://wiki.seeedstudio.com/Sensor/SenseCAP/SenseCAP_Indicator/Get_started_with_SenseCAP_Indicator/), a small programmable touch screen device. Everyone wished me good luck when I started. That’s how I knew it wasn’t going to be an easy ride. But here I am, and I’m glad I did it. What is embedded systems/programming? It’s combining hardware with software to perform a function, such as interacting with the physical world or accessing cloud services. A common starting point for beginners is an Arduino board, which is what the SenseCAP Indicator has for its hardware. My goal was to make a UI display for this device, and then send its input to a computer, and get that data into the cloud. ![hand_typing](/img/hardware-story/hand_typing.png)", - "path": "/blog/2024-11-12-hard-lessons-from-hardware" + "path": "/blog/2024/11/12/hard-lessons-from-hardware" }, { - "id": 98, + "id": 162, "about": "The Beginning", "text": "My journey kicked off with installing the [Arduino IDE](https://www.arduino.cc/en/software) on my computer. It took me two hours—far longer than I expected—because the software versions I kept trying were not the right ones. Little did I know that I would encounter this issue many times later, such as when downloading [ESP-IDF](https://docs.espressif.com/projects/esp-idf/en/stable/esp32/get-started/index.html), a tool for firmware flashing. Figuring out what not to install had become a highly coveted skill. The next part was writing software to display images and text. This was slightly less of a problem thanks to forums of users who had done the exact same thing several years ago. One tool I used was [Squareline Studio](https://squareline.io/), a UX/UI design tool for embedded devices. With a bit of trial and error, I got a simple static program displayed onto the device. Not half bad looking either. Here’s what it looked like: ![ui_static](/img/hardware-story/ui_static.png)", - "path": "/blog/2024-11-12-hard-lessons-from-hardware" + "path": "/blog/2024/11/12/hard-lessons-from-hardware" }, { - "id": 99, + "id": 163, "about": "The Middle", "text": "Now came the networking part. Over wifi, I set up a Flask (Python) server on my computer to receive network pings from the SenseCAP Indicator. I used a library called [ArduinoHTTPClient](https://github.com/arduino-libraries/ArduinoHttpClient). At first, I wanted to ping the server each time a user touched the screen. Then came driver problems, platform incompatibilities, deprecated libraries… … After weeks of limited progress due to resurfacing issues, I decided to adjust my goal to send pings on a schedule of every 5 seconds, rather than relying on user input. I changed the UI to be more colorful, and for good reason. Now, each network ping appears with a message on the screen. Can you look closely to see what it says? ![ui_wifi](/img/hardware-story/ui_wifi.png) This is what the Flask server looked like on my computer as it got pinged: ![local_server](/img/hardware-story/local_server.png) Hooray! Once everything was working, It was time to deploy my Flask code as a cloud service so I could access it from any computer, not just my own. Deployment usually takes several hours due to configuring a ton of cloud provider settings. But I ain’t got time for that. Instead, I used Defang to deploy it within minutes, which took care of all that for me. Saved me a lot of time and tears. Here’s the Flask deployment on Defang’s Portal view: ![portal_view](/img/hardware-story/portal_view.png) Here’s the Flask server on the cloud, accessed with a deployment link: ![deployed_server](/img/hardware-story/deployed_server.png)", - "path": "/blog/2024-11-12-hard-lessons-from-hardware" + "path": "/blog/2024/11/12/hard-lessons-from-hardware" }, { - "id": 100, + "id": 164, "about": "The End", "text": "After two whole months, I finally completed my journey from [start](https://github.com/commit111/defang-arduino-static) to [finish](https://github.com/commit111/defang-arduino-wifi)! This project was an insightful dive into the world of embedded systems, internet networking, and cloud deployment. Before I let you go, here are the hard lessons from hardware, from yours truly: 1. Learning what not to do can be equally as important. 2. Some problems are not as unique as you think. 3. One way to achieve a goal is by modifying it. 4. Choose the simpler way if it is offered. 5. That’s where Defang comes in. Want to try deploying to the cloud yourself? You can try it out [here](https://defang.io/samples). Keep on composing up! 💪", - "path": "/blog/2024-11-12-hard-lessons-from-hardware" + "path": "/blog/2024/11/12/hard-lessons-from-hardware" }, { - "id": 101, + "id": 165, "about": "**The Initial Setup: A More Complex Deployment**", "text": "The Portal isn’t a simple static website; it’s a **full-stack application** with the following services: - **Next.js frontend** – Including server components and server actions. - **Hasura (GraphQL API)** – Serves as a GraphQL layer. - **Hono (TypeScript API)** – Lightweight API for custom business logic. - **OpenAuth (Authentication Service)** – Manages authentication flows. - **Redis** – Used for caching and session storage. - **Postgres** – The main database. Initially, we provisioned databases and some DNS configurations using Infra-as-Code because Defang couldn’t yet manage them for us. We also deployed the services themselves manually through infrastructure-as-code, requiring us to define each service separately. This worked, but seemed unnecessarily complex, if we had the right tooling… ---", - "path": "/blog/2025-03-14-deploying-defang-with-defang-part-1" + "path": "/blog/2025/03/14/deploying-defang-with-defang-part-1" }, { - "id": 102, + "id": 166, "about": "**The Transition: Expanding Defang to Reduce Complexity**", "text": "We’ve made it a priority to expand Defang’s capabilities a lot over the last year so it could take on more of the heavy lifting of a more complex application. Over the past year, we’ve added loads of features to handle things like: - **Provisioning databases**, including managing passwords and other secrets securely - **Config interpolation using values stored in AWS SSM**, ensuring the same Compose file works both locally and in the cloud - **Provisioning certs and managing DNS records** from configuration in the Compose file. As a result, we reached a point where we no longer needed custom infrastructure definitions for most of our deployment.", - "path": "/blog/2025-03-14-deploying-defang-with-defang-part-1" + "path": "/blog/2025/03/14/deploying-defang-with-defang-part-1" }, { - "id": 103, + "id": 167, "about": "**What Changed?**", "text": "- **Previously**: GitHub Actions ran infra-as-code scripts to provision databases, manage DNS, and define services *separately from the Docker Compose file we used for local dev* - **Now**: Our [**Defang GitHub Action**](https://github.com/marketplace/actions/defang-deployment-action) targets normal Compose files and deploys everything, using secrets and variables managed in GitHub Actions environments. - **Result**: We **eliminated hundreds of lines of Infra-as-Code**, making our deployment leaner and easier to manage and reducing the differences between running the Portal locally and running it in the cloud. This wasn’t just about reducing complexity—it was also a validation exercise. We knew that Defang had evolved enough to take over much of our deployment, but by going through the transition process ourselves, we could identify and close the remaining gaps and make sure our users could really make use of Defang for complex production-ready apps. ---", - "path": "/blog/2025-03-14-deploying-defang-with-defang-part-1" + "path": "/blog/2025/03/14/deploying-defang-with-defang-part-1" }, { - "id": 104, + "id": 168, "about": "**How Deployment Works Today**, **Config & Secrets Management**, Deployment Modes, **DNS & Certs**, **CI/CD Integration**", - "text": "- **Sensitive configuration values** (database credentials, API keys) are stored **securely in AWS SSM** using Defang’s [configuration management tooling](https://docs.defang.io/docs/concepts/configuration). - [**Environment variable interpolation**](https://docs.defang.io/docs/concepts/configuration#interpolation) allows these **SSM-stored config values** to be referenced directly in the Compose file, ensuring the same configuration works in local and cloud environments. - **Defang provisions managed Postgres and Redis instances automatically** when using the `x-defang-postgres` and `x-defang-redis` extensions, securely injecting credentials where needed with variable interpolation. - [**Deployment modes**](https://docs.defang.io/docs/concepts/deployment-modes) (`development`, `staging`, `production`) adjust infrastructure settings across our dev/staging/prod deployments making sure dev is low cost, and production is secure and resilient. - When we first set up the portal (before we even released the private beta) DNS and certs had to be managed outside the Defang context. Now, **[we can provision certs using ACM or Let’s Encrypt](https://docs.defang.io/docs/concepts/domains)**. - **Previously**: GitHub Actions ran custom infra-as-code scripts. - **Now**: The [**Defang GitHub Action**](https://github.com/DefangLabs/defang-github-action) installs Defang automatically and runs `defang compose up`, simplifying deployment. - **Result**: A streamlined, repeatable CI/CD pipeline. ---", - "path": "/blog/2025-03-14-deploying-defang-with-defang-part-1" + "text": "- **Sensitive configuration values** (database credentials, API keys) are stored **securely in AWS SSM** using Defang’s [configuration management tooling](/docs/concepts/configuration). - [**Environment variable interpolation**](/docs/concepts/configuration#interpolation) allows these **SSM-stored config values** to be referenced directly in the Compose file, ensuring the same configuration works in local and cloud environments. - **Defang provisions managed Postgres and Redis instances automatically** when using the `x-defang-postgres` and `x-defang-redis` extensions, securely injecting credentials where needed with variable interpolation. - [**Deployment modes**](/docs/concepts/deployment-modes) (`affordable`, `balanced`, `high_availability`) adjust infrastructure settings across our dev/staging/prod deployments making sure dev is low cost, and production is secure and resilient. - When we first set up the portal (before we even released the private beta) DNS and certs had to be managed outside the Defang context. Now, **[we can provision certs using ACM or Let’s Encrypt](/docs/concepts/domains)**. - **Previously**: GitHub Actions ran custom infra-as-code scripts. - **Now**: The [**Defang GitHub Action**](https://github.com/DefangLabs/defang-github-action) installs Defang automatically and runs `defang compose up`, simplifying deployment. - **Result**: A streamlined, repeatable CI/CD pipeline. ---", + "path": "/blog/2025/03/14/deploying-defang-with-defang-part-1" }, { - "id": 105, + "id": 169, "about": "**The Takeaway: Why This Matters**", "text": "By transitioning to **fully Compose-based deployments**, we: * ✅ **Eliminated hundreds of lines of Infra-as-Code** * ✅ **Simplified configuration management** with secure, environment-aware secrets handling * ✅ **Streamlined CI/CD** with a lightweight GitHub Actions workflow * ✅ **Simplified DNS and cert management** Every sample project we built, every conversation we had with developers, and every challenge we encountered with the Portal helped us get to this point where we could focus on closing the gaps last few gaps to deploying everything from a Compose file.", - "path": "/blog/2025-03-14-deploying-defang-with-defang-part-1" + "path": "/blog/2025/03/14/deploying-defang-with-defang-part-1" }, { - "id": 106, + "id": 170, "about": "Townhall", "text": "If you're excited about what's coming next and want to hear more about our vision for the future, join us for our Townhall on August 21st. We'll be sharing more about our roadmap and what we're working on next. We'll also be making sure to take time to answer any questions you have, hear your feedback, and learn more about what you want from Defang! **[Register here](https://lu.ma/rlj13eq5)** --- We’re excited to keep improving Defang to make it the easiest way for you to Develop, Deploy, and Debug cloud application. Stay tuned for more updates next month.", - "path": "/blog/2024-07-31-july-product-updates-2" + "path": "/blog/2024/07/31/july-product-updates-2" }, { - "id": 107, + "id": 171, "about": "Prerequisites", - "text": "Before we dive into the details, let's make sure you have everything you need to get started: 1. **Install Defang CLI:** Simplify your deployment process by installing the Defang CLI tool. Follow the instructions [here](https://docs.defang.io/docs/getting-started#install-the-defang-cli) to get it up and running quickly. 2. **Slack API Token:** Create a Slack App at https://api.slack.com/apps, granting it the necessary permissions, including the bot `chat:write` scope. ![screenshot of the slack admin UI showing the bot scopes](/img/slackbot-sample/scopes.png) 3. **Install the app in your workspace:** You'll need to install the app in your workspace for it to work. Click the \"Install to Workspace\" button in the Slack admin UI to do this. Mine says \"Reinstall\" because I've already installed it. ![screenshot of the slack admin UI showing the install button](/img/slackbot-sample/install-app.png) 4. **Copy the Bot User OAuth Access Token:** This token will authenticate your Slackbot with the Slack API. ![screenshot of the slack admin UI showing the auth token field](/img/slackbot-sample/token.png) 5. **Invite the Bot to a Channel:** To enable your Slackbot, invite it to the desired channel using the `@` command. In the screenshot below, my bot's name actually starts with the word invite, but if your bot is called `mycoolbot` you would invite it with `@mycoolbot`. This ensures your Slackbot has the required permissions to interact with the channel. ![screenshot of the slack chat UI showing me inviting my bot](/img/slackbot-sample/invite.png) 6. **Clone the Sample Code:** Clone the Defang repository and navigate to the `samples/golang/slackbot` directory. This directory contains the sample code for the Slackbot. ```bash git clone https://github.com/DefangLabs/defang cd defang/samples/golang/slackbot ```", - "path": "/blog/2024-03-28-slackbot-sample" + "text": "Before we dive into the details, let's make sure you have everything you need to get started: 1. **Install Defang CLI:** Simplify your deployment process by installing the Defang CLI tool. Follow the instructions [here](/docs/getting-started#install-the-defang-cli) to get it up and running quickly. 2. **Slack API Token:** Create a Slack App at https://api.slack.com/apps, granting it the necessary permissions, including the bot `chat:write` scope. ![screenshot of the slack admin UI showing the bot scopes](/img/slackbot-sample/scopes.png) 3. **Install the app in your workspace:** You'll need to install the app in your workspace for it to work. Click the \"Install to Workspace\" button in the Slack admin UI to do this. Mine says \"Reinstall\" because I've already installed it. ![screenshot of the slack admin UI showing the install button](/img/slackbot-sample/install-app.png) 4. **Copy the Bot User OAuth Access Token:** This token will authenticate your Slackbot with the Slack API. ![screenshot of the slack admin UI showing the auth token field](/img/slackbot-sample/token.png) 5. **Invite the Bot to a Channel:** To enable your Slackbot, invite it to the desired channel using the `@` command. In the screenshot below, my bot's name actually starts with the word invite, but if your bot is called `mycoolbot` you would invite it with `@mycoolbot`. This ensures your Slackbot has the required permissions to interact with the channel. ![screenshot of the slack chat UI showing me inviting my bot](/img/slackbot-sample/invite.png) 6. **Clone the Sample Code:** Clone the Defang repository and navigate to the `samples/golang/slackbot` directory. This directory contains the sample code for the Slackbot. ```bash git clone https://github.com/DefangLabs/defang cd defang/samples/golang/slackbot ```", + "path": "/blog/2024/03/28/slackbot-sample" }, { - "id": 108, + "id": 172, "about": "Deployment Steps", "text": "Now that we have everything set up, let's dive into the deployment process. Follow these steps to deploy your Slackbot effortlessly: 1. **Set Up Secrets:** Prioritize security by configuring environment variables as sensitive config. Use the Defang CLI's `defang config set` command to set the `SLACK_TOKEN` and `SLACK_CHANNEL_ID` configs. Replace `your_slack_token` and `your_slack_channel_id` with the respective values: ```bash defang config set --name SLACK_TOKEN --value your_slack_token defang config set --name SLACK_CHANNEL_ID --value your_slack_channel_id ``` 2. **Deploy the Slackbot:** Use the Defang CLI's `defang compose up` command to deploy.", - "path": "/blog/2024-03-28-slackbot-sample" + "path": "/blog/2024/03/28/slackbot-sample" }, { - "id": 109, + "id": 173, "about": "Usage", "text": "With your Slackbot up and running, let's explore how to make the most of it. Let's send a POST request to the `/` endpoint with a JSON body containing the message you want to post to the Slack channel. Popular tools like cURL or Postman can help you send the request: ```bash curl 'https://raphaeltm-bot--8080.prod1.defang.dev/' \\ -H 'content-type: application/json' \\ --data-raw $'{\"message\":\"This is your bot speaking. We\\'ll be landing in 10 minutes. Please fasten your seatbelts.\"}' ```", - "path": "/blog/2024-03-28-slackbot-sample" + "path": "/blog/2024/03/28/slackbot-sample" }, { - "id": 110, + "id": 174, "about": "Takeaways", "text": "Congratulations! You've successfully deployed a Slackbot using Defang. If you deployed this as an internal service, you could use it to send status updates, alerts, or other important messages to your team. The possibilities are endless!", - "path": "/blog/2024-03-28-slackbot-sample" + "path": "/blog/2024/03/28/slackbot-sample" }, { - "id": 111, + "id": 175, "about": "Events and Adoption", "text": "In October, the Defang team was actively involved in a range of exciting events. We participated in [MLH Cloud Week](https://ghw.mlh.io/events/cloud), [StormHacks](https://stormhacks.com/), and hosted a DevTools Vancouver [meetup](https://lu.ma/devtools2), bringing together local DevTool founders, engineers, and enthusiasts. It was inspiring to see Defang in action, helping these hackers build their amazing projects. ![DevToolsMeetup](/img/october-update/devtools-meetup.jpg) When we shipped our Public Beta earlier in 2024, we had a goal to reach **1000 users** by end of year. **We are pleased to announce that we have already reached this milestone a couple of months in advance!** We are excited to see the momentum behind the product and how our users are using Defang for developing and deploying a variety of different applications. Thank you for your support!", - "path": "/blog/2024-11-13-october-product-updates" + "path": "/blog/2024/11/13/october-product-updates" }, { - "id": 112, + "id": 176, "about": "The Road Ahead", "text": "The team is now heads-down dotting the i’s and crossing the t’s so we can release Defang V1 before end of year. This will enable customers to use Defang for production workloads. We look forward to sharing more in our next monthly update. --- \"CoffeeChat\" --- As always, we appreciate your feedback and are committed to making Defang the easiest way to develop, deploy, and debug your cloud applications. Go build something awesome! 🚀", - "path": "/blog/2024-11-13-october-product-updates" + "path": "/blog/2024/11/13/october-product-updates" }, { - "id": 113, + "id": 177, "about": "Ask Defang", "text": "We've just rolled out an initial version of Ask Defang, our AI-driven documentation assistant that can help you navigate the Defang docs and get deploying faster. It's deployed with Defang and using a combination of embeddings run on a GPU-powered service, Defang managed Redis, and OpenAI APIs. Check it out [here](/docs/ask) or head to [ask.defang.io](https://ask.defang.io) to get started!", - "path": "/blog/2024-08-30-august-product-updates" + "path": "/blog/2024/08/30/august-product-updates" }, { - "id": 114, + "id": 178, "about": "Enhanced Platform Symmetry", "text": "We've been hard at work improving the symmetry between our Playground and BYOC environments as well as `docker compose up` vs `defang compose up` to provide a more seamless experience. You can now use multiple compose files and in BYOC the CLI will subscribe to events to monitor service health during deployments.", - "path": "/blog/2024-08-30-august-product-updates" + "path": "/blog/2024/08/30/august-product-updates" }, { - "id": 115, + "id": 179, "about": "AI Features and Debugging Improvements", "text": "We've been working on several AI-driven enhancements to make your development process even more efficient. We've addressed key issues, such as file generation failures due to parsing errors and invalid compose files. Also, our Debug Assistant is now more robust, handling context size limits better than before. --- ![Workshop](/img/august-update/workshop.jpg)", - "path": "/blog/2024-08-30-august-product-updates" + "path": "/blog/2024/08/30/august-product-updates" }, { - "id": 116, + "id": 180, "about": "August Events", "text": "This month, we hosted an in-person [workshop](https://www.linkedin.com/feed/update/urn:li:activity:7233915052083310595/) to get more developers acquainted with Defang and simplify their cloud app journey. You can view the presentation [here](https://s.defang.io/cloudjam2024). We also held the first-ever DevTools Vancouver [meetup](https://www.linkedin.com/posts/defanglabs_devtoolsmeetup-activity-7234599713751060483-zQsg?utm_source=share&utm_medium=member_desktop), bringing together local DevTool founders, engineers, and enthusiasts. Looking ahead, we’re excited to collaborate with [MLH](https://mlh.io/), [Hack the North](https://hackthenorth.com/), [LangaraHacks](https://lhacks.langaracs.ca/), and more in September. If you'd like to attend any of these events, be sure to sign up! --- \"TownHall\" ---", - "path": "/blog/2024-08-30-august-product-updates" + "path": "/blog/2024/08/30/august-product-updates" }, { - "id": 117, + "id": 181, "about": "Roadmap", "text": "We're working on a number of new features to make Defang even better. Here are some of the most exciting ones: - **Managed Postgres**: We're working on getting Defang to provision managed Postgres services for you, so you can easily store and query data in your applications. - **DigitalOcean BYOC**: We're working on adding DigitalOcean BYOC to give you even more choice over where you deploy your applications. --- We’re excited to keep enhancing Defang to make it the easiest way for you to Develop, Deploy, and Debug cloud applications. Stay tuned for more updates next month! 🚀", - "path": "/blog/2024-08-30-august-product-updates" + "path": "/blog/2024/08/30/august-product-updates" } -] +] \ No newline at end of file