From 5587da515e970d381be785b8471ee2fee2277ad3 Mon Sep 17 00:00:00 2001 From: Andy Garcia Date: Tue, 4 Nov 2025 14:47:58 -0800 Subject: [PATCH 01/31] Revert PR #1: Garcia/change to operator (#18) --- README.md | 14 +- deploy/client/values.yaml | 2 +- deploy/coprocessor/values.yaml | 4 +- .../ROUTER_CONFIG_MIGRATION.md | 188 -------------- deploy/operator-resources/apply-resources.sh | 37 --- deploy/operator-resources/ingress-dev.yaml | 12 - deploy/operator-resources/ingress-prod.yaml | 14 - deploy/operator-resources/supergraph-dev.yaml | 19 -- .../operator-resources/supergraph-prod.yaml | 19 -- .../supergraphschema-dev.yaml | 13 - .../supergraphschema-dev.yaml.template | 13 - .../supergraphschema-prod.yaml | 13 - .../supergraphschema-prod.yaml.template | 13 - .../deploy => deploy/router}/.helmignore | 0 deploy/router/Chart.lock | 6 + .../deploy => deploy/router}/Chart.yaml | 11 +- deploy/router/charts/router-1.33.0.tgz | Bin 0 -> 9411 bytes deploy/router/environments/dev.yaml | 5 + deploy/router/environments/prod.yaml | 58 +++++ deploy/router/rhai/client_id.rhai | 32 +++ deploy/router/rhai/main.rhai | 12 + deploy/router/templates/backendconfig.yaml | 12 + deploy/router/templates/ingress.yaml | 17 ++ deploy/router/templates/rhai-config.yaml | 8 + deploy/router/values.yaml | 65 +++++ .../deploy => deploy/subgraph}/.helmignore | 0 .../deploy => deploy/subgraph}/Chart.yaml | 0 .../subgraph}/environments/dev.yaml | 0 .../subgraph}/environments/prod.yaml | 0 .../subgraph}/templates/NOTES.txt | 0 .../subgraph}/templates/_helpers.tpl | 0 .../subgraph}/templates/deployment.yaml | 0 .../subgraph}/templates/hpa.yaml | 0 .../subgraph}/templates/ingress.yaml | 0 .../subgraph}/templates/service.yaml | 0 .../subgraph}/templates/serviceaccount.yaml | 0 .../templates/tests/test-connection.yaml | 0 .../deploy => deploy/subgraph}/values.yaml | 2 +- docs/cleanup.md | 131 +--------- docs/operator-guide.md | 239 ------------------ docs/setup.md | 158 +++--------- subgraphs/checkout/Dockerfile | 5 +- subgraphs/checkout/deploy/values.yaml | 43 ---- subgraphs/checkout/k8s/subgraph-dev.yaml | 15 -- subgraphs/checkout/k8s/subgraph-prod.yaml | 15 -- subgraphs/discovery/Dockerfile | 5 +- subgraphs/discovery/deploy/Chart.yaml | 24 -- .../discovery/deploy/environments/dev.yaml | 3 - .../discovery/deploy/environments/prod.yaml | 6 - .../discovery/deploy/templates/NOTES.txt | 22 -- .../discovery/deploy/templates/_helpers.tpl | 62 ----- .../deploy/templates/deployment.yaml | 57 ----- subgraphs/discovery/deploy/templates/hpa.yaml | 28 -- .../discovery/deploy/templates/ingress.yaml | 61 ----- .../discovery/deploy/templates/service.yaml | 15 -- .../deploy/templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 -- subgraphs/discovery/k8s/subgraph-dev.yaml | 14 - subgraphs/discovery/k8s/subgraph-prod.yaml | 14 - subgraphs/inventory/Dockerfile | 5 +- subgraphs/inventory/deploy/.helmignore | 23 -- .../inventory/deploy/environments/dev.yaml | 3 - .../inventory/deploy/environments/prod.yaml | 6 - .../inventory/deploy/templates/NOTES.txt | 22 -- .../inventory/deploy/templates/_helpers.tpl | 62 ----- .../deploy/templates/deployment.yaml | 57 ----- subgraphs/inventory/deploy/templates/hpa.yaml | 28 -- .../inventory/deploy/templates/ingress.yaml | 61 ----- .../inventory/deploy/templates/service.yaml | 15 -- .../deploy/templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 -- subgraphs/inventory/deploy/values.yaml | 43 ---- subgraphs/inventory/k8s/subgraph-dev.yaml | 14 - subgraphs/inventory/k8s/subgraph-prod.yaml | 14 - subgraphs/orders/Dockerfile | 5 +- subgraphs/orders/deploy/.helmignore | 23 -- subgraphs/orders/deploy/Chart.yaml | 24 -- subgraphs/orders/deploy/environments/dev.yaml | 3 - .../orders/deploy/environments/prod.yaml | 6 - subgraphs/orders/deploy/templates/NOTES.txt | 22 -- .../orders/deploy/templates/_helpers.tpl | 62 ----- .../orders/deploy/templates/deployment.yaml | 57 ----- subgraphs/orders/deploy/templates/hpa.yaml | 28 -- .../orders/deploy/templates/ingress.yaml | 61 ----- .../orders/deploy/templates/service.yaml | 15 -- .../deploy/templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 -- subgraphs/orders/deploy/values.yaml | 43 ---- subgraphs/orders/k8s/subgraph-dev.yaml | 14 - subgraphs/orders/k8s/subgraph-prod.yaml | 14 - subgraphs/products/Dockerfile | 5 +- subgraphs/products/deploy/.helmignore | 23 -- subgraphs/products/deploy/Chart.yaml | 24 -- .../products/deploy/environments/dev.yaml | 3 - .../products/deploy/environments/prod.yaml | 6 - subgraphs/products/deploy/templates/NOTES.txt | 22 -- .../products/deploy/templates/_helpers.tpl | 62 ----- .../products/deploy/templates/deployment.yaml | 57 ----- subgraphs/products/deploy/templates/hpa.yaml | 28 -- .../products/deploy/templates/ingress.yaml | 61 ----- .../products/deploy/templates/service.yaml | 15 -- .../deploy/templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 -- subgraphs/products/deploy/values.yaml | 43 ---- subgraphs/products/k8s/subgraph-dev.yaml | 14 - subgraphs/products/k8s/subgraph-prod.yaml | 14 - subgraphs/reviews/Dockerfile | 5 +- subgraphs/reviews/deploy/.helmignore | 23 -- subgraphs/reviews/deploy/Chart.yaml | 24 -- .../reviews/deploy/environments/dev.yaml | 3 - .../reviews/deploy/environments/prod.yaml | 6 - subgraphs/reviews/deploy/templates/NOTES.txt | 22 -- .../reviews/deploy/templates/_helpers.tpl | 62 ----- .../reviews/deploy/templates/deployment.yaml | 57 ----- subgraphs/reviews/deploy/templates/hpa.yaml | 28 -- .../reviews/deploy/templates/ingress.yaml | 61 ----- .../reviews/deploy/templates/service.yaml | 15 -- .../deploy/templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 -- subgraphs/reviews/deploy/values.yaml | 43 ---- subgraphs/reviews/k8s/subgraph-dev.yaml | 14 - subgraphs/reviews/k8s/subgraph-prod.yaml | 14 - subgraphs/shipping/Dockerfile | 5 +- subgraphs/shipping/deploy/.helmignore | 23 -- subgraphs/shipping/deploy/Chart.yaml | 24 -- .../shipping/deploy/environments/dev.yaml | 3 - .../shipping/deploy/environments/prod.yaml | 6 - subgraphs/shipping/deploy/templates/NOTES.txt | 22 -- .../shipping/deploy/templates/_helpers.tpl | 62 ----- .../shipping/deploy/templates/deployment.yaml | 57 ----- subgraphs/shipping/deploy/templates/hpa.yaml | 28 -- .../shipping/deploy/templates/ingress.yaml | 61 ----- .../shipping/deploy/templates/service.yaml | 15 -- .../deploy/templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 -- subgraphs/shipping/deploy/values.yaml | 43 ---- subgraphs/shipping/k8s/subgraph-dev.yaml | 14 - subgraphs/shipping/k8s/subgraph-prod.yaml | 14 - subgraphs/users/Dockerfile | 8 +- subgraphs/users/deploy/.helmignore | 23 -- subgraphs/users/deploy/Chart.yaml | 24 -- subgraphs/users/deploy/environments/dev.yaml | 3 - subgraphs/users/deploy/environments/prod.yaml | 6 - subgraphs/users/deploy/templates/NOTES.txt | 22 -- subgraphs/users/deploy/templates/_helpers.tpl | 62 ----- .../users/deploy/templates/deployment.yaml | 57 ----- subgraphs/users/deploy/templates/hpa.yaml | 28 -- subgraphs/users/deploy/templates/ingress.yaml | 61 ----- subgraphs/users/deploy/templates/service.yaml | 15 -- .../deploy/templates/serviceaccount.yaml | 12 - .../templates/tests/test-connection.yaml | 15 -- subgraphs/users/deploy/values.yaml | 43 ---- subgraphs/users/k8s/subgraph-dev.yaml | 14 - subgraphs/users/k8s/subgraph-prod.yaml | 14 - terraform/aws/create_graph.sh | 105 ++------ terraform/aws/setup_clusters.sh | 97 +++---- terraform/gcp/.terraform.lock.hcl | 167 ++++++------ terraform/gcp/create_graph.sh | 105 ++------ terraform/gcp/setup_clusters.sh | 37 --- terraform/minikube/create_graph.sh | 105 ++------ terraform/minikube/setup_clusters.sh | 75 +----- 161 files changed, 493 insertions(+), 4134 deletions(-) delete mode 100644 deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md delete mode 100755 deploy/operator-resources/apply-resources.sh delete mode 100644 deploy/operator-resources/ingress-dev.yaml delete mode 100644 deploy/operator-resources/ingress-prod.yaml delete mode 100644 deploy/operator-resources/supergraph-dev.yaml delete mode 100644 deploy/operator-resources/supergraph-prod.yaml delete mode 100644 deploy/operator-resources/supergraphschema-dev.yaml delete mode 100644 deploy/operator-resources/supergraphschema-dev.yaml.template delete mode 100644 deploy/operator-resources/supergraphschema-prod.yaml delete mode 100644 deploy/operator-resources/supergraphschema-prod.yaml.template rename {subgraphs/checkout/deploy => deploy/router}/.helmignore (100%) create mode 100644 deploy/router/Chart.lock rename {subgraphs/inventory/deploy => deploy/router}/Chart.yaml (87%) create mode 100644 deploy/router/charts/router-1.33.0.tgz create mode 100644 deploy/router/environments/dev.yaml create mode 100644 deploy/router/environments/prod.yaml create mode 100644 deploy/router/rhai/client_id.rhai create mode 100644 deploy/router/rhai/main.rhai create mode 100644 deploy/router/templates/backendconfig.yaml create mode 100644 deploy/router/templates/ingress.yaml create mode 100644 deploy/router/templates/rhai-config.yaml create mode 100644 deploy/router/values.yaml rename {subgraphs/discovery/deploy => deploy/subgraph}/.helmignore (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/Chart.yaml (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/environments/dev.yaml (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/environments/prod.yaml (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/templates/NOTES.txt (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/templates/_helpers.tpl (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/templates/deployment.yaml (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/templates/hpa.yaml (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/templates/ingress.yaml (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/templates/service.yaml (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/templates/serviceaccount.yaml (100%) rename {subgraphs/checkout/deploy => deploy/subgraph}/templates/tests/test-connection.yaml (100%) rename {subgraphs/discovery/deploy => deploy/subgraph}/values.yaml (89%) delete mode 100644 docs/operator-guide.md delete mode 100644 subgraphs/checkout/deploy/values.yaml delete mode 100644 subgraphs/checkout/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/checkout/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/discovery/deploy/Chart.yaml delete mode 100644 subgraphs/discovery/deploy/environments/dev.yaml delete mode 100644 subgraphs/discovery/deploy/environments/prod.yaml delete mode 100644 subgraphs/discovery/deploy/templates/NOTES.txt delete mode 100644 subgraphs/discovery/deploy/templates/_helpers.tpl delete mode 100644 subgraphs/discovery/deploy/templates/deployment.yaml delete mode 100644 subgraphs/discovery/deploy/templates/hpa.yaml delete mode 100644 subgraphs/discovery/deploy/templates/ingress.yaml delete mode 100644 subgraphs/discovery/deploy/templates/service.yaml delete mode 100644 subgraphs/discovery/deploy/templates/serviceaccount.yaml delete mode 100644 subgraphs/discovery/deploy/templates/tests/test-connection.yaml delete mode 100644 subgraphs/discovery/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/discovery/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/inventory/deploy/.helmignore delete mode 100644 subgraphs/inventory/deploy/environments/dev.yaml delete mode 100644 subgraphs/inventory/deploy/environments/prod.yaml delete mode 100644 subgraphs/inventory/deploy/templates/NOTES.txt delete mode 100644 subgraphs/inventory/deploy/templates/_helpers.tpl delete mode 100644 subgraphs/inventory/deploy/templates/deployment.yaml delete mode 100644 subgraphs/inventory/deploy/templates/hpa.yaml delete mode 100644 subgraphs/inventory/deploy/templates/ingress.yaml delete mode 100644 subgraphs/inventory/deploy/templates/service.yaml delete mode 100644 subgraphs/inventory/deploy/templates/serviceaccount.yaml delete mode 100644 subgraphs/inventory/deploy/templates/tests/test-connection.yaml delete mode 100644 subgraphs/inventory/deploy/values.yaml delete mode 100644 subgraphs/inventory/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/inventory/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/orders/deploy/.helmignore delete mode 100644 subgraphs/orders/deploy/Chart.yaml delete mode 100644 subgraphs/orders/deploy/environments/dev.yaml delete mode 100644 subgraphs/orders/deploy/environments/prod.yaml delete mode 100644 subgraphs/orders/deploy/templates/NOTES.txt delete mode 100644 subgraphs/orders/deploy/templates/_helpers.tpl delete mode 100644 subgraphs/orders/deploy/templates/deployment.yaml delete mode 100644 subgraphs/orders/deploy/templates/hpa.yaml delete mode 100644 subgraphs/orders/deploy/templates/ingress.yaml delete mode 100644 subgraphs/orders/deploy/templates/service.yaml delete mode 100644 subgraphs/orders/deploy/templates/serviceaccount.yaml delete mode 100644 subgraphs/orders/deploy/templates/tests/test-connection.yaml delete mode 100644 subgraphs/orders/deploy/values.yaml delete mode 100644 subgraphs/orders/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/orders/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/products/deploy/.helmignore delete mode 100644 subgraphs/products/deploy/Chart.yaml delete mode 100644 subgraphs/products/deploy/environments/dev.yaml delete mode 100644 subgraphs/products/deploy/environments/prod.yaml delete mode 100644 subgraphs/products/deploy/templates/NOTES.txt delete mode 100644 subgraphs/products/deploy/templates/_helpers.tpl delete mode 100644 subgraphs/products/deploy/templates/deployment.yaml delete mode 100644 subgraphs/products/deploy/templates/hpa.yaml delete mode 100644 subgraphs/products/deploy/templates/ingress.yaml delete mode 100644 subgraphs/products/deploy/templates/service.yaml delete mode 100644 subgraphs/products/deploy/templates/serviceaccount.yaml delete mode 100644 subgraphs/products/deploy/templates/tests/test-connection.yaml delete mode 100644 subgraphs/products/deploy/values.yaml delete mode 100644 subgraphs/products/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/products/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/reviews/deploy/.helmignore delete mode 100644 subgraphs/reviews/deploy/Chart.yaml delete mode 100644 subgraphs/reviews/deploy/environments/dev.yaml delete mode 100644 subgraphs/reviews/deploy/environments/prod.yaml delete mode 100644 subgraphs/reviews/deploy/templates/NOTES.txt delete mode 100644 subgraphs/reviews/deploy/templates/_helpers.tpl delete mode 100644 subgraphs/reviews/deploy/templates/deployment.yaml delete mode 100644 subgraphs/reviews/deploy/templates/hpa.yaml delete mode 100644 subgraphs/reviews/deploy/templates/ingress.yaml delete mode 100644 subgraphs/reviews/deploy/templates/service.yaml delete mode 100644 subgraphs/reviews/deploy/templates/serviceaccount.yaml delete mode 100644 subgraphs/reviews/deploy/templates/tests/test-connection.yaml delete mode 100644 subgraphs/reviews/deploy/values.yaml delete mode 100644 subgraphs/reviews/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/reviews/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/shipping/deploy/.helmignore delete mode 100644 subgraphs/shipping/deploy/Chart.yaml delete mode 100644 subgraphs/shipping/deploy/environments/dev.yaml delete mode 100644 subgraphs/shipping/deploy/environments/prod.yaml delete mode 100644 subgraphs/shipping/deploy/templates/NOTES.txt delete mode 100644 subgraphs/shipping/deploy/templates/_helpers.tpl delete mode 100644 subgraphs/shipping/deploy/templates/deployment.yaml delete mode 100644 subgraphs/shipping/deploy/templates/hpa.yaml delete mode 100644 subgraphs/shipping/deploy/templates/ingress.yaml delete mode 100644 subgraphs/shipping/deploy/templates/service.yaml delete mode 100644 subgraphs/shipping/deploy/templates/serviceaccount.yaml delete mode 100644 subgraphs/shipping/deploy/templates/tests/test-connection.yaml delete mode 100644 subgraphs/shipping/deploy/values.yaml delete mode 100644 subgraphs/shipping/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/shipping/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/users/deploy/.helmignore delete mode 100644 subgraphs/users/deploy/Chart.yaml delete mode 100644 subgraphs/users/deploy/environments/dev.yaml delete mode 100644 subgraphs/users/deploy/environments/prod.yaml delete mode 100644 subgraphs/users/deploy/templates/NOTES.txt delete mode 100644 subgraphs/users/deploy/templates/_helpers.tpl delete mode 100644 subgraphs/users/deploy/templates/deployment.yaml delete mode 100644 subgraphs/users/deploy/templates/hpa.yaml delete mode 100644 subgraphs/users/deploy/templates/ingress.yaml delete mode 100644 subgraphs/users/deploy/templates/service.yaml delete mode 100644 subgraphs/users/deploy/templates/serviceaccount.yaml delete mode 100644 subgraphs/users/deploy/templates/tests/test-connection.yaml delete mode 100644 subgraphs/users/deploy/values.yaml delete mode 100644 subgraphs/users/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/users/k8s/subgraph-prod.yaml diff --git a/README.md b/README.md index 9130016..f1f14a4 100644 --- a/README.md +++ b/README.md @@ -4,14 +4,14 @@ This repository contains a reference architecture utilizing [Kubernetes](https:/ Once the architecture is fully stood up, you'll have: -- An Apollo Router running and managed by the [Apollo GraphOS Operator](https://www.apollographql.com/docs/apollo-operator/), utilizing: +- An Apollo Router running utilizing: - [Persisted Queries for safelisting operations](https://www.apollographql.com/docs/router/configuration/persisted-queries/#differences-from-automatic-persisted-queries) - [A coprocessor for handling customizations outside of the router](https://www.apollographql.com/docs/router/customizations/coprocessor) - [Rhai scripts to do basic customizations within the router container](https://www.apollographql.com/docs/router/customizations/rhai) - [Authorization/Authentication directives](https://www.apollographql.com/docs/router/configuration/authorization) -- Eight subgraphs, each handling a portion of the overall supergraph schema, with schemas automatically published to GraphOS via the operator +- Eight subgraphs, each handling a portion of the overall supergraph schema - A React-based frontend application utilizing Apollo Client -- Apollo GraphOS Operator for automated schema publishing, composition, and deployment +- GitHub Actions to automate image building and GraphOS-specific implementations, including schema publishing and persisted query manifest creation/publishing - Tools to run k6 load tests against the architecture from within the same cluster ### The ending architecture @@ -45,14 +45,6 @@ During setup, you'll be: - Provisioning resources - Deploying the applications, including router, subgraphs, client, and observability tools -### [Operator Guide](/docs/operator-guide.md) - -Learn how the Apollo GraphOS Operator works in this architecture, including: -- Schema publishing and composition flow -- Monitoring operator-managed resources -- Troubleshooting common issues -- Updating router configuration - ### [Cleanup](/docs/cleanup.md) Once finished, you can cleanup your environments following the above document. diff --git a/deploy/client/values.yaml b/deploy/client/values.yaml index 40a3c89..80c14f5 100644 --- a/deploy/client/values.yaml +++ b/deploy/client/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/${GITHUB_ORG}/reference-architecture/client + repository: ghcr.io/apollosolutions/reference-architecture/client pullPolicy: Always tag: main diff --git a/deploy/coprocessor/values.yaml b/deploy/coprocessor/values.yaml index b37c6a1..d0ee01f 100644 --- a/deploy/coprocessor/values.yaml +++ b/deploy/coprocessor/values.yaml @@ -1,9 +1,7 @@ -namespace: apollo - replicaCount: 3 image: - repository: ghcr.io/${GITHUB_ORG}/reference-architecture/coprocessor + repository: ghcr.io/apollosolutions/reference-architecture/coprocessor pullPolicy: Always tag: main diff --git a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md deleted file mode 100644 index 1eb962c..0000000 --- a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md +++ /dev/null @@ -1,188 +0,0 @@ -# Router Configuration Migration Guide - -This document describes how the router configuration from `deploy/router/values.yaml` was migrated to operator-managed Supergraph CRDs. - -## Migration Summary - -All router configuration has been moved from Helm values (`deploy/router/values.yaml`) into the Supergraph CRD specifications: -- `deploy/operator-resources/supergraph-dev.yaml` (dev environment) -- `deploy/operator-resources/supergraph-prod.yaml` (prod environment) - -## Configuration Mapping - -### Core Router Settings (Both Dev and Prod) - -| Previous Location | New Location | Value | -|-------------------|--------------|-------| -| `router.configuration.health_check` | `spec.podTemplate.router.configuration.health_check` | `listen: 0.0.0.0:8080` | -| `router.configuration.sandbox` | `spec.podTemplate.router.configuration.sandbox` | `enabled: true` | -| `router.configuration.homepage` | `spec.podTemplate.router.configuration.homepage` | `enabled: false` | -| `router.configuration.supergraph` | `spec.podTemplate.router.configuration.supergraph` | `introspection: true` | -| `router.configuration.include_subgraph_errors` | `spec.podTemplate.router.configuration.include_subgraph_errors` | `all: true` | -| `router.configuration.plugins` | `spec.podTemplate.router.configuration.plugins` | `experimental.expose_query_plan: true` | - -### Authentication & Authorization - -- **JWKS Authentication**: Points to `http://graphql.users.svc.cluster.local:4001/.well-known/jwks.json` -- **Authorization Preview Directives**: Enabled for all subgraphs - -### Coprocessor Configuration - -- **URL**: `http://coprocessor.coprocessor.svc.cluster.local:8081` -- **Timeout**: 2s -- **Router Request Headers**: Enabled -- **Subgraph Request/Response Headers**: Enabled - -### Rhai Scripts - -Rhai scripts are handled via ConfigMap and volume mounts: -- **Scripts Location**: `/dist/rhai` (mounted from ConfigMap) -- **Main Script**: `main.rhai` -- **Helper Scripts**: `client_id.rhai` - -The ConfigMap must be created separately: -```bash -kubectl create configmap rhai-config --from-file=deploy/router/rhai/ -n apollo -``` - -### Prod-Only Configuration - -The following configurations are only present in `supergraph-prod.yaml`: - -#### Persisted Queries - -```yaml -persisted_queries: - enabled: true - log_unknown: true - safelist: - enabled: false - require_id: false -``` - -#### Telemetry - -- **Apollo Field-Level Instrumentation**: Sampler 0.5 -- **OTLP Tracing**: gRPC endpoint `http://collector.monitoring:4317` -- **OTLP Metrics**: gRPC endpoint `http://collector.monitoring:4317` -- **Service Name**: "router" -- **Service Namespace**: "router" - -## How to Update Router Configuration - -To update router configuration without redeploying subgraphs: - -1. Edit the appropriate Supergraph CRD file: - - Dev: `deploy/operator-resources/supergraph-dev.yaml` - - Prod: `deploy/operator-resources/supergraph-prod.yaml` - -2. Update the `spec.podTemplate.router.configuration` section - -3. Apply the changes: - ```bash - kubectl apply -f deploy/operator-resources/supergraph-{dev|prod}.yaml - ``` - -4. The operator will automatically trigger a router rollover with the new configuration - -## Resources - -Dev environment uses minimal resources: -- CPU: 100m -- Memory: 256Mi -- Replicas: 1 - -Prod environment uses production-grade resources: -- CPU: 500m -- Memory: 512Mi -- Replicas: 3 - -## Differences from Helm Chart - -The operator-managed approach differs from the Helm chart in several ways: - -1. **No Helm templates**: Configuration is defined in Kubernetes-native CRDs -2. **Automatic rollover**: The operator handles rolling out changes to the router -3. **Declarative**: All configuration is version-controlled in YAML files -4. **Condition-based**: Can monitor router status via `kubectl get supergraph` - -## Troubleshooting - -### Router not picking up changes - -Check the Supergraph status: -```bash -kubectl describe supergraph reference-architecture-{dev|prod} -n apollo -``` - -Look for: -- `SchemaLoaded`: Should be `True` -- `Progressing`: Shows deployment status -- `Ready`: Should be `True` when fully deployed - -### Rhai scripts not working - -Verify the ConfigMap exists and is mounted: -```bash -kubectl get configmap rhai-config -n apollo -kubectl describe pod -n apollo | grep rhai-volume -``` - -### Coprocessor connection issues - -Ensure coprocessor is running and accessible: -```bash -kubectl get pods -n coprocessor -kubectl get svc -n coprocessor -``` - -## Current Configuration Status - -The Supergraph CRDs in this repository use a **simplified configuration** that does not include all the advanced router settings from the original Helm chart. This is because the current Apollo GraphOS Operator CRD does not support all configuration fields. - -### Supported Configuration -- ✅ Replicas count -- ✅ Router version -- ✅ Resource limits/requests -- ✅ Schema source (SupergraphSchema resource reference) - -### Not Currently Supported in Supergraph CRD -- ❌ Custom router configuration (JWKS auth, coprocessor, CORS, etc.) -- ❌ Rhai scripts via ConfigMap volumes -- ❌ Custom ingress configuration -- ❌ Service type customization -- ❌ Telemetry exporters -- ❌ Advanced authentication/authorization - -### Operator API Key Setup - -The operator requires an **Operator API key** (not a personal API key). To create one: - -1. Go to GraphOS Studio -2. Navigate to your graph → Settings → API Keys -3. Create a new API key with "Operator" role -4. Update the `apollo-api-key` secret with the Operator API key: - ```bash - kubectl create secret generic apollo-api-key \ - --from-literal="APOLLO_KEY=" \ - -n apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - ``` - -### TODO: Advanced Router Configuration - -The advanced router configuration (JWKS, coprocessor, Rhai scripts, telemetry, persisted queries) from the original `deploy/router/values.yaml` has not been migrated yet. This would need to be implemented either: - -1. Via router configuration YAML file in a ConfigMap (if supported) -2. Through GraphOS Studio router configuration -3. By extending the operator to support these fields -4. By using a custom router deployment instead of the operator-managed one - -### Current Status - -- Graph is created ✅ -- Dev and prod variants created ✅ -- Subgraphs deployed and CRDs created ✅ -- Operator API key needs to be set up ⚠️ -- Advanced router configuration not yet migrated ⏳ - diff --git a/deploy/operator-resources/apply-resources.sh b/deploy/operator-resources/apply-resources.sh deleted file mode 100755 index 3967579..0000000 --- a/deploy/operator-resources/apply-resources.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -set -euo pipefail - -# This script applies the operator resources with the correct graph ID -# Usage: ./apply-resources.sh {dev|prod} - -ENVIRONMENT=${1:-dev} - -if [[ "$ENVIRONMENT" != "dev" && "$ENVIRONMENT" != "prod" ]]; then - echo "Error: Environment must be 'dev' or 'prod'" - exit 1 -fi - -# Check if TF_VAR_apollo_graph_id is set -if [[ -z "${TF_VAR_apollo_graph_id:-}" ]]; then - echo "Error: TF_VAR_apollo_graph_id is not set. Please source .env file from your terraform directory." - exit 1 -fi - -echo "Deploying operator resources for ${ENVIRONMENT} environment with graph ID: ${TF_VAR_apollo_graph_id}" - -# Apply SupergraphSchema with graph ID substitution -if command -v envsubst &> /dev/null; then - envsubst < "supergraphschema-${ENVIRONMENT}.yaml" | kubectl apply -f - -else - # Fallback if envsubst not available - sed "s|\${TF_VAR_apollo_graph_id}|${TF_VAR_apollo_graph_id}|g" "supergraphschema-${ENVIRONMENT}.yaml" | kubectl apply -f - -fi - -# Apply Supergraph -kubectl apply -f "supergraph-${ENVIRONMENT}.yaml" - -# Apply Ingress -kubectl apply -f "ingress-${ENVIRONMENT}.yaml" - -echo "Operator resources deployed successfully for ${ENVIRONMENT} environment" - diff --git a/deploy/operator-resources/ingress-dev.yaml b/deploy/operator-resources/ingress-dev.yaml deleted file mode 100644 index edfdbed..0000000 --- a/deploy/operator-resources/ingress-dev.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: router - namespace: apollo -spec: - defaultBackend: - service: - name: router - port: - number: 80 - diff --git a/deploy/operator-resources/ingress-prod.yaml b/deploy/operator-resources/ingress-prod.yaml deleted file mode 100644 index acbc645..0000000 --- a/deploy/operator-resources/ingress-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: router - namespace: apollo - annotations: - cloud.google.com/backend-config: '{"default": "http-hc-config"}' -spec: - defaultBackend: - service: - name: router - port: - number: 80 - diff --git a/deploy/operator-resources/supergraph-dev.yaml b/deploy/operator-resources/supergraph-dev.yaml deleted file mode 100644 index effc65c..0000000 --- a/deploy/operator-resources/supergraph-dev.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Supergraph -metadata: - name: reference-architecture-dev - namespace: apollo -spec: - replicas: 1 - serviceName: router - podTemplate: - routerVersion: 1.37.0 - resources: - requests: - cpu: 100m - memory: 256Mi - schema: - resource: - name: reference-architecture-dev - namespace: apollo - diff --git a/deploy/operator-resources/supergraph-prod.yaml b/deploy/operator-resources/supergraph-prod.yaml deleted file mode 100644 index 19f0bbc..0000000 --- a/deploy/operator-resources/supergraph-prod.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Supergraph -metadata: - name: reference-architecture-prod - namespace: apollo -spec: - replicas: 3 - serviceName: router - podTemplate: - routerVersion: 1.37.0 - resources: - requests: - cpu: 500m - memory: 512Mi - schema: - resource: - name: reference-architecture-prod - namespace: apollo - diff --git a/deploy/operator-resources/supergraphschema-dev.yaml b/deploy/operator-resources/supergraphschema-dev.yaml deleted file mode 100644 index e25f40e..0000000 --- a/deploy/operator-resources/supergraphschema-dev.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: SupergraphSchema -metadata: - name: reference-architecture-dev - namespace: apollo -spec: - graphRef: ${TF_VAR_apollo_graph_id}@dev - selectors: - - matchExpressions: - - key: apollo.io/subgraph - operator: Exists - partial: false - diff --git a/deploy/operator-resources/supergraphschema-dev.yaml.template b/deploy/operator-resources/supergraphschema-dev.yaml.template deleted file mode 100644 index aa9d798..0000000 --- a/deploy/operator-resources/supergraphschema-dev.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: SupergraphSchema -metadata: - name: reference-architecture-dev - namespace: apollo -spec: - graphRef: apollo-supergraph-k8s@dev - selectors: - - matchExpressions: - - key: apollo.io/subgraph - operator: Exists - partial: false - diff --git a/deploy/operator-resources/supergraphschema-prod.yaml b/deploy/operator-resources/supergraphschema-prod.yaml deleted file mode 100644 index 7cbc87a..0000000 --- a/deploy/operator-resources/supergraphschema-prod.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: SupergraphSchema -metadata: - name: reference-architecture-prod - namespace: apollo -spec: - graphRef: ${TF_VAR_apollo_graph_id}@prod - selectors: - - matchExpressions: - - key: apollo.io/subgraph - operator: Exists - partial: false - diff --git a/deploy/operator-resources/supergraphschema-prod.yaml.template b/deploy/operator-resources/supergraphschema-prod.yaml.template deleted file mode 100644 index ea72f2e..0000000 --- a/deploy/operator-resources/supergraphschema-prod.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: SupergraphSchema -metadata: - name: reference-architecture-prod - namespace: apollo -spec: - graphRef: apollo-supergraph-k8s@prod - selectors: - - matchExpressions: - - key: apollo.io/subgraph - operator: Exists - partial: false - diff --git a/subgraphs/checkout/deploy/.helmignore b/deploy/router/.helmignore similarity index 100% rename from subgraphs/checkout/deploy/.helmignore rename to deploy/router/.helmignore diff --git a/deploy/router/Chart.lock b/deploy/router/Chart.lock new file mode 100644 index 0000000..cd7faf6 --- /dev/null +++ b/deploy/router/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: router + repository: oci://ghcr.io/apollographql/helm-charts + version: 1.11.0 +digest: sha256:7a0f28e03497e4f01d11e5bd911a63350af4a7b7dc29c8c8f036dc42daf9da13 +generated: "2023-01-11T14:19:31.938427-05:00" diff --git a/subgraphs/inventory/deploy/Chart.yaml b/deploy/router/Chart.yaml similarity index 87% rename from subgraphs/inventory/deploy/Chart.yaml rename to deploy/router/Chart.yaml index 189e683..98b3e45 100644 --- a/subgraphs/inventory/deploy/Chart.yaml +++ b/deploy/router/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -name: subgraph +name: router description: A Helm chart for Kubernetes # A chart can be either an 'application' or a 'library' chart. @@ -15,10 +15,15 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.1.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: main +appVersion: "1.37.0" + +dependencies: + - name: router + version: 1.37.0 + repository: oci://ghcr.io/apollographql/helm-charts diff --git a/deploy/router/charts/router-1.33.0.tgz b/deploy/router/charts/router-1.33.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..903d22a32eec4c0b52ddce4a9b60cdc1f5369732 GIT binary patch literal 9411 zcmV;!Bs|+6iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBxQ`|VV`2Ouran@ujAvbn+Lm(N~oxQ~pCQD5o6lCW9wo<7? zZL7OAxGj$)H}n9%&wfiUTYhx|VVKPh-r5OmOGigX(s}9Vh|xk|HrSsbCc-6}r+>No zXE+=Vcb`93{|<-4=D)+Ot>Irr&v!=KJ6kVyc6R+ zt!C%Gkq71EXUsUESpg9341-xb`pTRLL1j(kricF9> zh6!b`m(w(*@B>aTL$ZdKp*fB~Pcu&}a~uK6^OVH;^TM`f8vCQ^@n?I?`d^^5!2EY& z0IS#k==t_=)Lj2BcDEkZ|9w0g@Cr{*kqXdLK&@guDMD`pKFkOQ!U4g*_TIh;CX~&Q z5IBJeNwF;O3dbp87#4^TG)^%GL1By`N1WpXNG2#OX~96?d7dJHdFXizD+aJni%di? z^1KZQ0yx1KW)xQB!!UfJE7>4S9(aWLwFvBPn zv(NDi$EW>8U%phI1WxfB3%0Z+$>U8Cn3QFn zftX=Xbb`I3Lpa4tIrJpKnIMy;nM3l`i6|06W&1h#5ihR?yx6|l3qUE&rtqcJ?@(j-5g}Jgju@I_*>}W2NcV%WKZ@oK#ZZK ztc1@6Lx8gdVKkfLOrW&lH{J#uoFj>UB(ai353>a&-^XC2Mb1~RH!MctpNGv{p&iI1dofUx8c=r zf{&kSFZSO?@c41b?89P69SVBv`1=2GhALb);CuAR$}~ zWD)a?VU*A;T}B{SfxR|t*_9Ot_T#n|EzOnRqC7G$z%8o3TEbe83L7#+ z42cn)M1PK@5e1bBEqRVWQ@uE2nh8WQ%#J7%++_hvS^zceC^$J~Lax>?Mh@46RLC(~ zU>0ir|3y~FV2)1Zx?@-_;W-kq=2^OohBw1ZattY8BTy1dwf_TUfX^kd6#v3jF6l~V z5;!ZeO!HFNG>P$Bnn}_s$%^#|dzC-~BZ<1fYoQ5BXadfXoDxN9{a#}B@}bWFVcvSxkm#WZI$#+*}Trkb8YfZ{nGIA5btI5Z`4J%Dy* z22dN?2%ZmzBd^)2SuBlik&KAj^-9+4>y-Q!%$H`g>irrQd^x7)(J0({S*|w4*(3Pq z^O^t2Yk+nzhdIit(MZu4rwV6j&?)J!Fvgq~EXKSPj^m}4jB0&!49*(`NcTYj{Y*3b zS@*@SUMRNeJEg=Kt(TH7Oeq&ITwHlxN+vj7#wk`mH>AFWPXx6r2A|`&98*h?a*QE> z!I)%&G2%1#We~f+c#3fjqoLtLM@5=yUapBKQMLR;@q|f{rmdHr8MeJRR_d`@Nhd~# zbGahLOzIYPF#^&Bt=O-W5~<#*zYGVLfcCPC3dKkCz$J+twgxz5d<3s4UjN7MfBeR?7exT{+6=w zeU>hdDHT#>;Y%*?+@UiovOWHeX0kx@^-mnL2u21KML*es_KobP1s9kd9(kJ47+&2R z3$MRlOmmQbzXOs(L)&%NDq?tir#78J~#MG-y7smz)j4d;0ST`ROy$)q{r4vf*%{qxC zIa9sr)X)V+o!4EiH64hQoAIsC+$dRl2bSK3n$vRKutG^(O<#~YK7Gt#L6|5cr8WPp z|Lh!(4K=rUj$%6^8!$xzpP{8QN*h{w8~o;oWEHL&Cj;2U4JCg_wJ1FSGr5$% z;ByoUX)pRfnPiO{!1WpugqqA{DcN+hA%{@y z2K5S>FGEd)aE#(poF#2~w^5ZWrS%pdE9PU#m0k{;(_6A`OZb{13JDg7q#TG=Vb74} z|4J9d=K`|~rP`qbF-qkS&KNDGv%Ym={>7Q`l60&RXMrN6-5PDgv?agRa2aDzE7PJ? z!-keK0_7yMYdofN&ntD9Qa1cAhJqI;O_#v1)DLi`O>9ApiM=crR;2++;t2{VMhmQU zAjCoGu{ubM0cT*JetD zNR7a;s5}EMEq%n{Ey;*v*rByO5?a#B_Qa`HWV*LPi)Cp-osn#+pr<60MNslf!PWP< z@_AsXG9$KpruxtDoG!Fs6BjI9!k8gNn~79xA6KM)92%f=QgPupFO;mDTds%a%BUM{ zy?jed7k@1@4jKp zrIPikZ0aAXr8-VZ99qBK5hGM7T2f`u>Ykv=-eEMA043|hbA$I%Yn1T}Cq;^xm%Ft~ zG1% z66CNAC?yMAgJWN;1k)ld&1p)fOW?W0L$NKv5Xl6;9(B!Fl585_1&P%xvp@q4G;o0S z%sE@*L{-?Y%B!{UM{l5f-H`?G)A1Y0kge-a%rM8+Ey5%3;v$f0U-+}$Pq3z;H9xMdR0)Q%DTYT< zEraO$R%xxST}l0_Djh28PrsEc=Hfy&^r%@^q_PaZxB#h#U0p?k!NrAcO2LJztBVT_ ztZG3Q^_qXC9LqKg|L5SpqX|BuO!y_rMl}tkjjrM0bLAj?_x{zv=cD(>AHIL|L~_Dd zq%g$-f}k|$KqAsIj^OGF0t&xynn@(T_b)=Je1*!n_;L6t%nTA&zIC#23CX$#ed8Op zei9!-xVqYGV0L)az)M2qx5ADHJXeTufkeSWlq7N_uzF{HT=@!tct*9QIv6}MXhe@n zO7#%^4MoX6P%1TCwuA7at5a)$$6}8q+Juzv>wKsvnWWv@xWrtG#Qm^n+bv z5gxsN_4(c2+k*zRr1Eghsh)z5@;t2TGP2_L9h9u7_^e%tWL!vUecx80<1Q{EHRx+~@R21^C(o&rV2Mk~foj_t&hYv_6HbI3<O^rF`X4P$zqzNS|9zg} zG{=mGBEPFpur>PM?(T4_q5tiSww^!efA{fRTnwIi2j{u+R5dntl%FZ|o(>F`PVj_e zO+FY@AC>bf)S<+R~5<+$^4|4OvpL-f#1;1JcZo8-Zw&J3sRCT z;nxDCI<5ufxk5Yi{ugW5s=Sab$S!ag;}}V<#OWNvk0rm>ot+SzCLEA5o99Fr_xX%) zcw&4Kuil-=vQhz%40AK|4ky4g+tNTv!KGsW^v;$j!-NVh@#IoZDbmzn+eg<*Qd`kwQKC{`%ut;aCzu18Issjc4uU4Ux+k$;1TwzwzL2+;70r zjb49tb^qCzb9#nQ22!4}86w694s(?EkDx5LZ(~^A)rQjPf-L1rg{xOl+gyd5Gt=ba zHG$M}mZ{lhrmilfw#WPNRfRfnRAhE%fd1PAxWFYaoTpM``~T&G|JlF!>ZbEK%}gvo zFM|5B#&CwydB|r2g>=-V>YDC^y1~|GZhWn84A9V-!lr+FWekt}*4FaMsCB8ow5nXf zuLTvj(cd-{6ZIF2mr+d-Ix(ABUG%b!;&l{jb~5P1KyQVu)Nwm%SsgcOsc)<1Yg4*( zhYqahkQ)~9u8%*~i-jiWpdq7HuelyjncF{yDOsR)L$PJBbOPvMJjYlREVIF<)?;oZ z$Qa50(#&qiB{rb5muj6Mt|#SQctyf;D_8RVRkmYO-d7cEx7%&j!(Yqw5jYeG-06}K zH68vzgp|6Tbu~|km=$9c2|m`JG!x*1aZ1O7Ig%D*^+|CSSF;s)YUm~M)boyUmS84@ zvU&?*hD0U9Y`xI?iL0POca><^VFi#WSUx;~gv0`T9feeqM4DTnprM#y{YrvR!6!aE zQe3_@fW1uw#tmX7zO;r9Irv;Kl1+UGb!V)v#1s^HYQKc0WpLow`P{7X#MHS2LH~v3 z>1KEB`eZgk*_w-0fcsk=DtLs%thR`sJbCiOl1x|HtOq}ChGiW6W@rKqHyyxDZKyMV zvIJovH~_TAu*twBbtIZjIDLP=2TwhPl7ZT^l9g z4l3Z)gfi~uQ@nfzkMultywDClMo*~Jo(QDgBWqTo;`)o;wB@ew&Biow|Ndj`zn1;4 z*NRkc@9Z$x8vB2=HQa96|IfEZ5BC3kJPq}!i(!_gq5k;8H5G+QwWvdrwxwDwB_H6whe_bi3l#s!dg{m61D;P!DsvL@wE7VCF<`?0IWIxyR-A6dH#QAcjw{! z@4Y-OGeddK2PH?V!f?7-+UgC)Z;`#pAA_gxVTKu2d&}ksr8*;7=VqLmBTUL@6siW{ z%E2sVQZ-gOYuSw_Uv1fJy1^CofPD{Yr-wsA2RdTj*>t_NAyj%dO4G*@uF)bj&bTK$ zPlhQ4d0wb9`*L%Dkb!5L#Y(g_Z& z)0>qMpe|9JW9+Rc69DUi(*vZ!$=^6jqI*%aos-p^-PLd{uQqU-gDfM|RzUYF+SR?i`)9=kez#uMw~TQO&vbpcZz9?=C$k*KOX$6_SvM5_<_-R z)c62qU+71?JjRpuS4TOog8Qz!5fnD@)XyniM$oFfr%2FIoaXw(U3r*q&CIB2VNdWp z7pn+sM|N`!O?6_AS;$hUw6$| zoGsduaYeWH-?m4mOz6=$Z!1xCEpRsV|jyvB_-;5EmKU;!KFVflMR4 zgo9H(r${L4wpVe^sGu=TBlxg?)ZM1ay>aVSbw-aHwA;O9Z;rCp?XEVm#!|d3wl=-V z4bg24VU5X@slPX8xwJ&ztn@JYjZm0Z_R?^eH$ATK9c3?5O0lIIOo(o{YOS%_M|n zFVPx)j5{XVP3hJqTiO2`)6Jf@U8|0k$C&#kZm%ZY*f^50=z?8=q~uj~i<>*X%ajh0$08-(xo3R-w3Pp7(My1UdeJMpB}?=v$+$ z_4aTd^6Tbk2Qi(lEo|npTTaMpPDUs5nLA3?ce?kEYX6M9o%f7~2$>uZghUTOES#Y&TbJ z4HjFqxa$65V=T~?8$F^otN%$E`AupG=@Cu#ObyKbEIZeBKS%ZYgGxJ7dNsOs_~rZ7 zF8|FGjar@@hbg-=$SGqgxw&-uDph4nB{x&{iut-aE03jGU=r;e8|%ifW^3$J?A#;> z&5QDlZot*dG$^M%cXMEY8Jc3}V{ezQhj~Met);|@#6!0vhEAn)U-Y;J{vA=+|3Yi!;hj zWFXj%I`w3uYg5KHEG<*ZNg|}5K;hbq4(qf7-JGzpQuVucoXQ_6(dfE9 z)R41@Z)Hm=MNtbyo847EjWoD@Ns_95S3!SO+N0{JQs&wcAKaWe<}sN>o1SwgcWwRY zTZZ+=W*u$SHd#j%Mv^I1Uh9&7ta3~~i2tvCTJk?n#`kRhkG6K2_dh+~+S+}P|M&5< zBPpBtJ(Z*mf}kd?a}`xpiRhzty+4=jy`GNJ8s4+H$4VQzd{g(bUZZ-0`K@alo&S`f zM)S3I>k(*yxJkJ4t%98&xccw?wD`YPg6>oVtl|H|-4{De{{Ld<;r`Eic^a&vH`=jj zu9ci$$3k^8`&&mLese;=cD*N3Qcu|x?Rq#8wU_@zz_vSTq;zNTfBCRih zsy2K;IzgmeVU3MQ)1Wu#?W;oGRCrtM+@1=3>%rI!L0y4!RqRGVr2Sp|{mv(L z_Ol+n+TT&P+zKuGrb@M%l{K-^$Nx=Mt~(I}YxTd~-3I^P-5EaUfA{iqFQZ2Kv=z#q zTl{+RV#R^J{~*a>DR_qh5dU*8Ph-{8ujIO? zZ9r9UM!B$%t4v(s-+230Gv7@**p)rp^Ypb5jTZHsc2t9&{YP~_ncjgMV}TmX?m$9W zzZV&}jhaH{fYtTD$J5lF{D9=L+zI(^z8|RpMbbbegyWBi%Z`9SAZtT{zw-IaI zu(z5=@b=*3pPvu@--mo8t^((BLRO@>HM<{OXL`HC6LnCV7h{g=gj}=4 znn@bgEg8YkWI|$dd|Xu$RqZ)Va0DlZZ;##_G!nJ`2kkoc$^W@Ar0zlgAB|f1|95vD z^8eq@b8R!)%r*oMe(WGO6`fE4;g2uG*zr{7%;d1cpoij5^ zPyGmFH%slWnJvy5s~HwC5=P518PUcCUykXyd1>xbjo{%U>L--G?P^}Kce@9xg_&cph@ zkLOR}|Ly73jXl6WYd9;zBN{%YuXX!=BP{bi&#;vK+yMNiQp8M=z-MS_-u08&lxz9S zdnZbjP|F_8SYJMzBa*e_4~z&k=|7D27pR}y<{(nmdE}gADHy3Ao5&fSkn;%qfq~$! zDvi%kEM#F@N;T&;;5c#{8I-J3wpy3X^CTT-I;?y9<#5e`pf0+hBq2I^U~Q&0k`k?} zxoQIS?WX>jof$^{hf>2K(~L5F_ie1x|D~|6%m1wx5Ay$ho((ucLSUAu8=&+IfU_CS zU|f(iA=wmi6rZ9g=ApL%>P{+NFXx*5XzEL{T4R85HzxFfVndA=lv5%2;*Kj zC1RldtI_wu@o#LP{%c>%rUUsO`;#xSK?P`x;?p9B2}v>co`(D^_nwAhbm~0~#a#YP z8JT)d|G&2ZKO;tH!Qt@L0r$e3(O+;ZypSXq4RmovfAPWvk7;L#*@73Fb zaGv}gZLD7ZqwVeO=KUXA!-x3)`*}7@Ml#R)sQ>u%q~p3pl+!e&M&}rq3oPKc-~wLD zmvBrAA$bKWGC|ChxqGS-R8j4o_t%f5wmk~Bx5Hu88ktW+K2vuwOT~MPk|`dT(t$1< zkZeNP9BEnqKU^$R{QV?FBH+Ikh~Z86>qmXW)wy|oO$)Mo;1s`38&yW~2o~4sM^z(O zT(d8px3K|73^V*|K{yeZd)`y`^6JGX9DVm+s@Q8R;27r=4zmeW*X_kL<21$UvRufE zGzA(HIi|CiNwn$%DoX|CDos7O5ZJ5v%s{D&_+(p$85bx`K_zGRyr)kO=TaD;Ohiwg z0+lRWg4>2lP~fDEQ;|;@N-zWgF#tavAG|r(J2_An{eSveeB}$JF~WRl{?ZxDRX;wT zU<@B?!M>lKY}7AgfiX_$S@>D?dcT&h&r{X+)fI;1H+vJJq6qv0PoxMnWFpzLni|%N zl+c(DNG2E^6r5z!&+@}yBd*{DVup1GgRz2VsYJ5rW(XfBaEw7xt;BhpVv~KFYl)g* zfrzw0q=|X5!I;8pZBpf(DH=hAzazV=q95*h&F}^DvwTG6^=$XBg z@@Au0PBN|Rd)_7JQ!whN`X#(F5!UiA9y5~bo5j4#KtB4v_Mh5c>X&y}Chmhvpkpb! z;S#=FTz!E{0QJ2i{naImDNSX;iL^U9%epy>E`ekU?r1pdF6?AQstR`&U6UB7to^cT zsXIHfQ}X3YSyo@rnNx0OuPLZ+HL^R}oe-r_+-Ui50!vx|wRgfxfXcl?GWgONS}zSa z%uEpd7g-^LIXcC_3#RToR*ItnWCHCKJay4h2U0ezbBWh?!|#Tj{j?<&IHl;aBFp4g z1oh|#CB)1nrmD^6-5i$+HQCz7PqMAO3=m^z33Q^%I=*40T^pwhQA^4QUdU+z2j;5O zW)qR)JBVq{XpA|h%ylWFc#a3ow`dd&O9Nd20Goo{&5-?kI2=`-vKi}}J!+NMe+!qO zuNs!6e800)dCzlyN#l}gufsicFK$Ue?dwvv)8=`)v=`D=xrud%y1DP3aC6~(ouDS- z!3KzZrWyWh95U4llgZSU5ofesM!qm*%3joR?)DSUCPeKlVcn+h?c(VnTkA@bTDil? zJ1UMC<~S40I?k;twL0BwSx?I*41G0AKVt5EL7m;~atQWt;dX|ZzLp!=iuzb}Eg#Fr z;U}Aa#Q~~?ty%5H&D>vK(MpbN19o@a%9W`6ZYy?WO+hJ+EjKZoO=5d-;6ke^QvcN%A}A#EK)b6$LYr8fm2T zDq9IAWvyqbYig>)-q9hPV#QKOf-^xTOWSEd;Y1V(p*?N4F0{WH4h6?xL6`&7a5f|H z47i}oTmxQ5QtE#gC35H#$I}8U!)HJ<9LnZ67JyQkP2o!)vA#T0LQc0Qb;Nc%XCzHw zj7zp!7{N(6I3MT!6EnO{?g{pg~8k~X^Vwc_hX{xDSk(|1Q)J@cz5p`K#0 zP<6d_bWz@{9bNcU6sr7XM5iAqiTSJzOdV!@=1Ym?N7XH$SMt$w27{g6XJ=<&O_@;6 zBQ?#9pRv~GfwUJE1fLByq`_%#iJXvBJCNi{IY*0F6h`fD-$BBA$xS-9);-|F@oCoU^ZMz8|+70y=ZMm>?-eJWDt5vkt7t$RD0;!hP@ zou9p=_4csNL!D}p{l^$`RdFR7jcFF+T&O>cdQ$xvl1#W$yjZz>-E*}T9QozydWy%YG8o|8ff4ET_Hz$ zA|$#efEV28@%9Qqr#O=Ylp>$exv|o00EJNsJ}Qrp?4ojRj$F5@craV>r#6)_VmDM#7RNJe{m2%upnja_?B?tPWIpo9h@H{*Z&wu9ge*pjh|Nn>r JuG;|0007L=XwCot literal 0 HcmV?d00001 diff --git a/deploy/router/environments/dev.yaml b/deploy/router/environments/dev.yaml new file mode 100644 index 0000000..79519d8 --- /dev/null +++ b/deploy/router/environments/dev.yaml @@ -0,0 +1,5 @@ +router: + resources: + requests: + cpu: 100m + memory: 256Mi diff --git a/deploy/router/environments/prod.yaml b/deploy/router/environments/prod.yaml new file mode 100644 index 0000000..028082c --- /dev/null +++ b/deploy/router/environments/prod.yaml @@ -0,0 +1,58 @@ +router: + router: + configuration: + # duplicated from ../values.yaml + health_check: + listen: 0.0.0.0:8080 + sandbox: + enabled: true + homepage: + enabled: false + supergraph: + introspection: true + include_subgraph_errors: + all: true + plugins: + experimental.expose_query_plan: true + cors: + allow_any_origin: true + persisted_queries: + enabled: true + log_unknown: true + safelist: + enabled: false + require_id: false + rhai: + scripts: /dist/rhai + main: main.rhai + authentication: + router: + jwt: + jwks: + - url: http://graphql.users.svc.cluster.local:4001/.well-known/jwks.json + authorization: + preview_directives: + enabled: true + # end duplication + telemetry: + apollo: + field_level_instrumentation_sampler: 0.5 + exporters: + tracing: + otlp: + endpoint: http://collector.monitoring:4317 + protocol: grpc + enabled: true + common: + sampler: 0.5 + service_name: "router" + service_namespace: "router" + metrics: + otlp: + endpoint: http://collector.monitoring:4317 + protocol: grpc + enabled: true + resources: + requests: + cpu: 500m + memory: 512Mi diff --git a/deploy/router/rhai/client_id.rhai b/deploy/router/rhai/client_id.rhai new file mode 100644 index 0000000..a73b339 --- /dev/null +++ b/deploy/router/rhai/client_id.rhai @@ -0,0 +1,32 @@ +fn process_request(request) { + log_info("processing request"); + let valid_client_names = ["apollo-client", "retail-website"]; + + if ("apollographql-client-version" in request.headers && "apollographql-client-name" in request.headers) { + let client_header = request.headers["apollographql-client-version"]; + let name_header = request.headers["apollographql-client-name"]; + + if !valid_client_names.contains(name_header) { + log_error("Invalid client name provided"); + throw #{ + status: 401, + message: "Invalid client name provided" + }; + } + + if client_header == "" { + log_error("No client version provided"); + throw #{ + status: 401, + message: "No client version provided" + }; + } + } + else { + log_error("No client headers set. Please provide headers: apollographql-client-name and apollographql-client-version"); + throw #{ + status: 401, + message: "No client headers set. Please provide headers: apollographql-client-name and apollographql-client-version" + }; + } +} \ No newline at end of file diff --git a/deploy/router/rhai/main.rhai b/deploy/router/rhai/main.rhai new file mode 100644 index 0000000..7bf0f45 --- /dev/null +++ b/deploy/router/rhai/main.rhai @@ -0,0 +1,12 @@ +import "client_id" as client_id; + +fn process_request(request) { + client_id::process_request(request) +} + +fn supergraph_service(service) { + // Rhai convention for creating a function pointer + const request_callback = Fn("process_request"); + + service.map_request(request_callback); +} diff --git a/deploy/router/templates/backendconfig.yaml b/deploy/router/templates/backendconfig.yaml new file mode 100644 index 0000000..d4bc091 --- /dev/null +++ b/deploy/router/templates/backendconfig.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.ingress.enabled .Values.ingress.gcp -}} +apiVersion: cloud.google.com/v1 +kind: BackendConfig +metadata: + name: http-hc-config +spec: + healthCheck: + checkIntervalSec: 15 + port: {{ splitList ":" ((index .Values.router.router.configuration "health_check").listen | default ":8088") | last }} + type: HTTP + requestPath: /health +{{- end }} diff --git a/deploy/router/templates/ingress.yaml b/deploy/router/templates/ingress.yaml new file mode 100644 index 0000000..7593a85 --- /dev/null +++ b/deploy/router/templates/ingress.yaml @@ -0,0 +1,17 @@ +{{- if .Values.ingress.enabled -}} +{{- $svcPort := .Values.router.service.port -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.ingress.name }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + defaultBackend: + service: + name: {{ .Values.ingress.name }} + port: + number: {{ $svcPort }} +{{- end }} diff --git a/deploy/router/templates/rhai-config.yaml b/deploy/router/templates/rhai-config.yaml new file mode 100644 index 0000000..848adc3 --- /dev/null +++ b/deploy/router/templates/rhai-config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: rhai-config + labels: + {{- include "router.labels" . | nindent 4 }} +data: + {{- (.Files.Glob "rhai/*").AsConfig | nindent 2 }} \ No newline at end of file diff --git a/deploy/router/values.yaml b/deploy/router/values.yaml new file mode 100644 index 0000000..5861bd2 --- /dev/null +++ b/deploy/router/values.yaml @@ -0,0 +1,65 @@ +# Default values for router. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +router: + router: + configuration: + # duplicated in environments/prod.yaml + health_check: + listen: 0.0.0.0:8080 + sandbox: + enabled: true + homepage: + enabled: false + supergraph: + introspection: true + include_subgraph_errors: + all: true + plugins: + experimental.expose_query_plan: true + rhai: + scripts: /dist/rhai + main: main.rhai + authentication: + router: + jwt: + jwks: + - url: http://graphql.users.svc.cluster.local:4001/.well-known/jwks.json + authorization: + preview_directives: + enabled: true + cors: + allow_any_origin: true + coprocessor: + url: http://coprocessor.coprocessor.svc.cluster.local:8081 + timeout: 2s + router: + request: + headers: true # These boolean properties indicate which request data to include in the coprocessor request. All are optional and false by default. + subgraph: + all: + request: + headers: true + response: + headers: true + + extraVolumeMounts: + - name: rhai-volume + mountPath: /dist/rhai + readOnly: true + extraVolumes: + - name: rhai-volume + configMap: + # Provide the name of the ConfigMap containing the files you want + # to add to the container + name: rhai-config + ngress: + enabled: false + service: + annotations: + cloud.google.com/backend-config: '{"default": "http-hc-config"}' +ingress: + enabled: true + gcp: true + name: router diff --git a/subgraphs/discovery/deploy/.helmignore b/deploy/subgraph/.helmignore similarity index 100% rename from subgraphs/discovery/deploy/.helmignore rename to deploy/subgraph/.helmignore diff --git a/subgraphs/checkout/deploy/Chart.yaml b/deploy/subgraph/Chart.yaml similarity index 100% rename from subgraphs/checkout/deploy/Chart.yaml rename to deploy/subgraph/Chart.yaml diff --git a/subgraphs/checkout/deploy/environments/dev.yaml b/deploy/subgraph/environments/dev.yaml similarity index 100% rename from subgraphs/checkout/deploy/environments/dev.yaml rename to deploy/subgraph/environments/dev.yaml diff --git a/subgraphs/checkout/deploy/environments/prod.yaml b/deploy/subgraph/environments/prod.yaml similarity index 100% rename from subgraphs/checkout/deploy/environments/prod.yaml rename to deploy/subgraph/environments/prod.yaml diff --git a/subgraphs/checkout/deploy/templates/NOTES.txt b/deploy/subgraph/templates/NOTES.txt similarity index 100% rename from subgraphs/checkout/deploy/templates/NOTES.txt rename to deploy/subgraph/templates/NOTES.txt diff --git a/subgraphs/checkout/deploy/templates/_helpers.tpl b/deploy/subgraph/templates/_helpers.tpl similarity index 100% rename from subgraphs/checkout/deploy/templates/_helpers.tpl rename to deploy/subgraph/templates/_helpers.tpl diff --git a/subgraphs/checkout/deploy/templates/deployment.yaml b/deploy/subgraph/templates/deployment.yaml similarity index 100% rename from subgraphs/checkout/deploy/templates/deployment.yaml rename to deploy/subgraph/templates/deployment.yaml diff --git a/subgraphs/checkout/deploy/templates/hpa.yaml b/deploy/subgraph/templates/hpa.yaml similarity index 100% rename from subgraphs/checkout/deploy/templates/hpa.yaml rename to deploy/subgraph/templates/hpa.yaml diff --git a/subgraphs/checkout/deploy/templates/ingress.yaml b/deploy/subgraph/templates/ingress.yaml similarity index 100% rename from subgraphs/checkout/deploy/templates/ingress.yaml rename to deploy/subgraph/templates/ingress.yaml diff --git a/subgraphs/checkout/deploy/templates/service.yaml b/deploy/subgraph/templates/service.yaml similarity index 100% rename from subgraphs/checkout/deploy/templates/service.yaml rename to deploy/subgraph/templates/service.yaml diff --git a/subgraphs/checkout/deploy/templates/serviceaccount.yaml b/deploy/subgraph/templates/serviceaccount.yaml similarity index 100% rename from subgraphs/checkout/deploy/templates/serviceaccount.yaml rename to deploy/subgraph/templates/serviceaccount.yaml diff --git a/subgraphs/checkout/deploy/templates/tests/test-connection.yaml b/deploy/subgraph/templates/tests/test-connection.yaml similarity index 100% rename from subgraphs/checkout/deploy/templates/tests/test-connection.yaml rename to deploy/subgraph/templates/tests/test-connection.yaml diff --git a/subgraphs/discovery/deploy/values.yaml b/deploy/subgraph/values.yaml similarity index 89% rename from subgraphs/discovery/deploy/values.yaml rename to deploy/subgraph/values.yaml index cd0958c..352bf7a 100644 --- a/subgraphs/discovery/deploy/values.yaml +++ b/deploy/subgraph/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/apollosolutions/reference-architecture/checkout pullPolicy: Always tag: main diff --git a/docs/cleanup.md b/docs/cleanup.md index e254fa8..7898c89 100644 --- a/docs/cleanup.md +++ b/docs/cleanup.md @@ -6,107 +6,12 @@ Running Google Cloud or AWS resources will continue to incur costs on your accou ### Automated cleanup -### Delete Operator-Managed Resources - -Before deleting Kubernetes resources, first remove the operator-managed CRDs. **The following steps are provided for both dev and prod clusters:** - -```sh -# Start with dev cluster -kubectx apollo-supergraph-k8s-dev - -# Delete Supergraph resources (this deletes the router deployment) -kubectl delete supergraphs reference-architecture-dev -n apollo - -# Delete SupergraphSchema resources -kubectl delete supergraphschemas reference-architecture-dev -n apollo - -# Delete Ingress resources for the router -kubectl delete ingress router -n apollo || true - -# Delete Subgraph resources (this will also stop schema publishing) -kubectl delete subgraph --all --all-namespaces - -# Uninstall Helm releases -helm uninstall coprocessor -n apollo -helm uninstall client -n client - -# Uninstall subgraph Helm releases before deleting namespaces -for subgraph in checkout discovery inventory orders products reviews shipping users; do - helm uninstall $subgraph -n $subgraph || true -done - -# Delete subgraph namespaces (each subgraph has its own namespace) -kubectl delete namespace checkout discovery inventory orders products reviews shipping users - -# Delete client namespace -kubectl delete namespace client - -# Delete operator API key secret (contains sensitive data) -# Note: Helm release secrets (sh.helm.release.v1.*) are automatically cleaned up by helm uninstall -kubectl delete secret apollo-api-key -n apollo-operator || true - -# Uninstall the Apollo GraphOS Operator -# This will also automatically clean up Helm release secrets (sh.helm.release.v1.*) -helm uninstall apollo-operator -n apollo-operator - -# Delete operator namespaces -kubectl delete namespace apollo-operator apollo - -# Repeat for prod cluster -kubectx apollo-supergraph-k8s-prod - -kubectl delete supergraphs reference-architecture-prod -n apollo -kubectl delete supergraphschemas reference-architecture-prod -n apollo -kubectl delete ingress router -n apollo || true -kubectl delete subgraph --all --all-namespaces -helm uninstall coprocessor -n apollo -helm uninstall client -n client - -# Uninstall subgraph Helm releases before deleting namespaces -for subgraph in checkout discovery inventory orders products reviews shipping users; do - helm uninstall $subgraph -n $subgraph || true -done - -kubectl delete namespace checkout discovery inventory orders products reviews shipping users -kubectl delete namespace client - -# Delete operator API key secret (contains sensitive data) -# Note: Helm release secrets (sh.helm.release.v1.*) are automatically cleaned up by helm uninstall -kubectl delete secret apollo-api-key -n apollo-operator || true - -# Uninstall the Apollo GraphOS Operator -# This will also automatically clean up Helm release secrets (sh.helm.release.v1.*) -helm uninstall apollo-operator -n apollo-operator -kubectl delete namespace apollo-operator apollo -``` - ### Cloud-specific steps There are a few cloud-specific steps you'll need to take. #### GCP -**Clean up GCP Workload Identity bindings** (created during setup for monitoring): - -```sh -# You'll need your PROJECT_ID and CLUSTER_PREFIX (default: apollo-supergraph-k8s) -CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -PROJECT_ID="" - -# Remove workload identity binding (shared across dev and prod clusters) -# Note: This only needs to be run once, not per cluster -gcloud iam service-accounts remove-iam-policy-binding \ - "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${PROJECT_ID}.svc.id.goog[monitoring/metrics-writer]" || true -``` - -**Note:** The GCP IAM service account `${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com` may be created by Terraform. If it's not removed by `terraform destroy`, you can delete it manually: - -```sh -gcloud iam service-accounts delete "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" || true -``` - In order to delete some non-Kubernetes resources created by Google Cloud, it's easiest to just delete everything: ```sh @@ -123,7 +28,14 @@ kubectl delete daemonsets,replicasets,services,deployments,pods,rc,ingress --all #### AWS -In order to ensure the load balancers are properly removed, and the IAM service roles are removed, run the following, replacing `apollo-supergraph-k8s` with the appropriate cluster prefix if modified: +In order to ensure the load balancers are properly removed, and the IAM service roles are removed, please run: + +```sh +gh workflow run "Uninstall Router" --repo $GITHUB_ORG/reference-architecture +open https://github.com/$GITHUB_ORG/reference-architecture/actions/workflows/uninstall-router.yaml +``` + +Wait for the action to complete on the opened screen, and once finished, run the following, replacing `apollo-supergraph-k8s` with the appropriate cluster prefix if modified: ```sh # dev @@ -138,33 +50,6 @@ eksctl delete iamserviceaccount \ aws cloudformation delete-stack --stack-name eksctl-apollo-supergraph-k8s-prod-addon-iamserviceaccount-kube-system-aws-load-balancer-controller ``` -### Delete Monitoring Resources - -The monitoring namespace may contain additional resources (InfluxDB, Grafana, Zipkin, etc.) that should be cleaned up. **Repeat these steps for both dev and prod clusters:** - -```sh -# Start with dev cluster -kubectx apollo-supergraph-k8s-dev - -# Uninstall monitoring components (if deployed) -helm uninstall influxdb -n monitoring || true -helm uninstall grafana -n monitoring || true -helm uninstall otel-collector -n monitoring || true -helm uninstall zipkin -n zipkin || true - -# Delete monitoring namespaces -kubectl delete namespace monitoring zipkin || true - -# Repeat for prod cluster -kubectx apollo-supergraph-k8s-prod - -helm uninstall influxdb -n monitoring || true -helm uninstall grafana -n monitoring || true -helm uninstall otel-collector -n monitoring || true -helm uninstall zipkin -n zipkin || true -kubectl delete namespace monitoring zipkin || true -``` - ### Remaining steps Then you can destroy all the provisioned resources (Kubernetes clusters, GitHub repositories) with terraform: diff --git a/docs/operator-guide.md b/docs/operator-guide.md deleted file mode 100644 index 14a1896..0000000 --- a/docs/operator-guide.md +++ /dev/null @@ -1,239 +0,0 @@ -# Apollo GraphOS Operator Guide - -This document explains how the Apollo GraphOS Operator is used in this reference architecture. - -## Overview - -The Apollo GraphOS Operator automates the management of GraphQL subgraphs and supergraphs in Kubernetes. It handles: -- Schema publishing to Apollo GraphOS -- Automatic composition of subgraphs into a supergraph -- Deployment of the Apollo Router -- Schema change detection and re-composition - -## How It Works - -### Architecture Components - -The operator manages three types of Kubernetes resources: - -1. **Subgraph** - Defines a GraphQL subgraph with its schema location and endpoint -2. **SupergraphSchema** - Selects Subgraphs and composes them into a supergraph schema -3. **Supergraph** - Deploys the composed schema as a running Apollo Router - -### Schema Publishing Flow - -```mermaid -graph TB - S1[Subgraph CRD Deployed] - S2[Operator Extracts Schema] - S3[Schema Published to GraphOS] - S4[GraphOS Composes Supergraph] - S5[Supergraph CRD Fetches Composed Schema] - S6[Operator Deploys Router] - - S1 --> S2 - S2 --> S3 - S3 --> S4 - S4 --> S5 - S5 --> S6 -``` - -When a Subgraph CRD is deployed: -1. The operator extracts the schema from the container image (`/app/schema.graphql`) -2. Publishes the schema to Apollo GraphOS Studio -3. SupergraphSchema triggers composition of all matching subgraphs -4. The composed schema is available in GraphOS Studio -5. The Supergraph CRD fetches the composed schema and deploys the router - -## Namespace Organization - -### Dev Environment (`apollo-supergraph-k8s-dev` cluster) - -- **apollo-operator**: Operator installation and controller -- **apollo**: SupergraphSchema and Supergraph resources, router deployment -- **checkout, discovery, inventory, orders, products, reviews, shipping, users**: Individual subgraph services - -### Prod Environment (`apollo-supergraph-k8s-prod` cluster) - -Same structure as dev, but with production configurations (more replicas, telemetry, persisted queries). - -## Label Strategy - -### Subgraph Labels - -Each Subgraph CRD has two labels: - -- `app: {subgraph-name}` - Unique identifier for each subgraph -- `apollo.io/subgraph: "true"` - Common label used for composition selection - -### SupergraphSchema Selection - -The SupergraphSchema uses `matchExpressions` to select all subgraphs: - -```yaml -spec: - selectors: - - matchExpressions: - - key: apollo.io/subgraph - operator: Exists -``` - -This matches any Subgraph CRD with the `apollo.io/subgraph` label, regardless of value. - -## Monitoring Resources - -### Check Subgraph Status - -```bash -# List all subgraphs -kubectl get subgraph --all-namespaces - -# Describe a specific subgraph -kubectl describe subgraph checkout -n checkout - -# Watch subgraph status -kubectl get subgraph -w -``` - -Look for `SchemaLoaded` condition in the status to verify schema extraction. - -### Check Composition Status - -```bash -# Get SupergraphSchema status -kubectl get supergraphschema -n apollo - -# Describe for detailed status -kubectl describe supergraphschema reference-architecture-dev -n apollo -``` - -Status conditions: -- **SubgraphsDetected**: Lists all matching subgraphs and their schema hashes -- **CompositionPending**: Shows composition request state -- **Available**: Indicates successful composition with launch ID - -### Check Router Deployment - -```bash -# Get supergraph status -kubectl get supergraph -n apollo - -# Describe for detailed status -kubectl describe supergraph reference-architecture-dev -n apollo -``` - -Status conditions: -- **SchemaLoaded**: Latest supergraph schema loaded -- **Progressing**: Deployment in progress -- **Ready**: Router replicas are running and ready - -## Automatic Schema Changes - -When you update a subgraph schema and redeploy the image: - -1. Build and push new image with updated schema -2. Update the Subgraph CRD to reference the new image -3. Operator detects the change and extracts the new schema -4. Schema is published to GraphOS Studio -5. SupergraphSchema triggers re-composition -6. Supergraph fetches the new composed schema -7. Router is rolled out with the new schema - -### Manual Trigger - -If you need to manually trigger composition: - -```bash -# Edit the SupergraphSchema -kubectl edit supergraphschema reference-architecture-dev -n apollo - -# Temporarily disable composition -# Set: compositionEnabled: false - -# Save and exit, then re-enable -# Set: compositionEnabled: true (or remove the field) -``` - -## Troubleshooting - -### Subgraph Not Publishing - -**Symptoms**: Schema doesn't appear in GraphOS Studio - -**Check**: -```bash -kubectl describe subgraph -n -``` - -Look for errors in: -- Schema extraction from image -- API key authentication -- Network connectivity to GraphOS - -### Composition Failing - -**Symptoms**: No composed schema available - -**Check**: -```bash -kubectl describe supergraphschema -n apollo -``` - -Common issues: -- Schema conflicts between subgraphs -- Invalid graph ID or variant -- Missing subgraphs (if not using `partial: true`) - -### Router Not Deploying - -**Symptoms**: No router pods running - -**Check**: -```bash -kubectl describe supergraph -n apollo -``` - -Look for: -- Schema loading errors -- Image pull errors -- Resource constraints -- Pod scheduling issues - -### Viewing Router Logs - -```bash -# Get router pods -kubectl get pods -n apollo - -# View logs -kubectl logs -n apollo deployment/reference-architecture-{dev|prod} -``` - -## Updating Router Configuration - -To update router configuration without changing subgraphs: - -```bash -# Edit the Supergraph CRD -kubectl edit supergraph reference-architecture-dev -n apollo - -# Update spec.podTemplate.router.configuration -# Save and the operator will roll out the changes -``` - -Changes are applied via rolling update - the operator manages the rollout. - -## Best Practices - -1. **Version Schemas**: Keep track of schema versions in your container images -2. **Monitor Compositions**: Set up alerts on composition failures -3. **Test Locally**: Use local development with the operator before promoting to prod -4. **Gradual Rollout**: Update schemas incrementally and monitor composition -5. **Backup Configs**: Keep Supergraph and SupergraphSchema YAML files in version control - -## Additional Resources - -- [Apollo GraphOS Operator Documentation](https://www.apollographql.com/docs/apollo-operator) -- [Operator API Reference](https://www.apollographql.com/docs/apollo-operator/resources) -- [GraphOS Studio](https://studio.apollographql.com) - diff --git a/docs/setup.md b/docs/setup.md index f115d5f..297937b 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -52,17 +52,17 @@ git pull - [kubectx](https://github.com/ahmetb/kubectx#installation) - [Github CLI](https://cli.github.com/) - [jq](https://stedolan.github.io/jq/download/) -- [Rover CLI](https://www.apollographql.com/docs/rover/getting-started/) (for creating operator API keys) -- [Helm](https://helm.sh/docs/intro/install/) (for installing the operator) #### GCP - [GCloud CLI](https://cloud.google.com/sdk/docs/install) +- Optional: [Helm](https://helm.sh/docs/intro/install/) #### AWS - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) - [eksctl](https://eksctl.io/introduction/#installation) +- Optional: [Helm](https://helm.sh/docs/intro/install/) -**Note**: The `create_graph.sh` script automatically creates an Operator API key using the Platform API. This key is exported as `OPERATOR_KEY` in your `.env` file and will be used by `setup_clusters.sh` to configure the operator secret. - After this completes, you're ready to deploy your subgraphs! ## Part C: Deploy applications @@ -280,32 +272,23 @@ After this completes, you're ready to deploy your subgraphs! ### Deploy subgraphs to dev -Deploy the subgraph services and register them with the operator: - ```sh -kubectx apollo-supergraph-k8s-dev - -# Deploy each subgraph service -for subgraph in checkout discovery inventory orders products reviews shipping users; do - kubectl create namespace $subgraph --dry-run=client -o yaml | kubectl apply -f - - helm install $subgraph subgraphs/$subgraph/deploy -f subgraphs/$subgraph/deploy/environments/dev.yaml -n $subgraph - kubectl apply -f subgraphs/$subgraph/k8s/subgraph-dev.yaml -done +gh workflow run "Merge to Main" --repo $GITHUB_ORG/reference-architecture +# this deploys a dependency for prod, see note below +gh workflow run "Deploy Open Telemetry Collector" --repo $GITHUB_ORG/reference-architecture ``` -The operator will automatically publish schemas to GraphOS and trigger composition. You can monitor the progress: +
+ Note about "initial commit" errors -```sh -# Check if subgraphs are registered -kubectl get subgraphs --all-namespaces +When terraform creates the repositories, they immediately kick off initial workflow runs. But as the secrets needed are not available at that point, the "initial commit" runs will fail. As a result, we're just re-running them with the commands above to ensure the environments are correctly deployed. -# Check composition status -kubectl describe supergraphschemas reference-architecture-dev -n apollo -``` +
You can try out a subgraph using port forwarding: ```sh +kubectx apollo-supergraph-k8s-dev kubectl port-forward service/graphql -n checkout 4001:4001 ``` @@ -313,112 +296,55 @@ Then visit [http://localhost:4001/](http://localhost:4001/). ### Deploy subgraphs to prod -Deploy the subgraphs to production using the same process: - -```sh -kubectx apollo-supergraph-k8s-prod - -# Deploy each subgraph service -for subgraph in checkout discovery inventory orders products reviews shipping users; do - kubectl create namespace $subgraph --dry-run=client -o yaml | kubectl apply -f - - helm install $subgraph subgraphs/$subgraph/deploy -f subgraphs/$subgraph/deploy/environments/prod.yaml -n $subgraph - kubectl apply -f subgraphs/$subgraph/k8s/subgraph-prod.yaml -done -``` - -Monitor the deployment: +Commits to the `main` branch of the subgraph repos are automatically built and deployed to the `dev` cluster. To deploy to prod, run the deploy actions: ```sh -# Check if subgraphs are registered -kubectl get subgraphs --all-namespaces - -# Check composition status -kubectl describe supergraphschemas reference-architecture-prod -n apollo + gh workflow run "Manual Deploy - Subgraphs" --repo $GITHUB_ORG/reference-architecture \ + -f version=main \ + -f environment=prod \ + -f dry-run=false \ + -f debug=false ``` -You've successfully deployed your subgraphs! The next step is to deploy the Apollo Router and Coprocessor. - - -### Deploy the coprocessor and router - -Deploy the coprocessor first: - ```sh -# Deploy to dev (with envsubst for variable substitution) -kubectx apollo-supergraph-k8s-dev -if command -v envsubst &> /dev/null; then - envsubst < deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo -else - # Fallback if envsubst not available - sed "s|\${GITHUB_ORG}|${GITHUB_ORG:-apollosolutions}|g" deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo -fi - -# Deploy to prod kubectx apollo-supergraph-k8s-prod -if command -v envsubst &> /dev/null; then - envsubst < deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo -else - # Fallback if envsubst not available - sed "s|\${GITHUB_ORG}|${GITHUB_ORG:-apollosolutions}|g" deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo -fi -``` - -Once the coprocessor is deployed, deploy the router using the operator Supergraph CRDs: - -**Note:** Make sure you've sourced the `.env` file from your terraform directory first to set `TF_VAR_apollo_graph_id`: - -```sh -cd terraform/gcp # or terraform/aws, terraform/minikube -source .env -cd ../.. +kubectl port-forward service/graphql -n checkout 4001:4001 ``` -Then deploy the operator resources: +Then visit [http://localhost:4001/](http://localhost:4001/). You've successfully deployed your subgraphs! The next step is to deploy the Apollo Router and Coprocessor. -```sh -# Deploy to dev -kubectx apollo-supergraph-k8s-dev -cd deploy/operator-resources -./apply-resources.sh dev -cd ../.. -# Deploy to prod -kubectx apollo-supergraph-k8s-prod -cd deploy/operator-resources -./apply-resources.sh prod -cd ../.. -``` +### Deploy the coprocessor and router -Or manually apply with kubectl: +To do so, we'll need to run: ```sh -# Deploy to dev (with envsubst for variable substitution) -kubectx apollo-supergraph-k8s-dev -envsubst < deploy/operator-resources/supergraphschema-dev.yaml | kubectl apply -f - -kubectl apply -f deploy/operator-resources/supergraph-dev.yaml -kubectl apply -f deploy/operator-resources/ingress-dev.yaml +gh workflow run "Deploy Coprocessor" --repo $GITHUB_ORG/reference-architecture \ + -f environment=dev \ + -f dry-run=false \ + -f debug=false -# Deploy to prod -kubectx apollo-supergraph-k8s-prod -envsubst < deploy/operator-resources/supergraphschema-prod.yaml | kubectl apply -f - -kubectl apply -f deploy/operator-resources/supergraph-prod.yaml -kubectl apply -f deploy/operator-resources/ingress-prod.yaml +gh workflow run "Deploy Coprocessor" --repo $GITHUB_ORG/reference-architecture \ + -f environment=prod \ + -f dry-run=false \ + -f debug=false ``` -The operator will automatically deploy the router based on the composed supergraph schema. You can monitor the deployment: +First, and once the deploy completes, we'll deploy the router: ```sh -# Check router deployment status -kubectl get supergraphs -n apollo - -# Check router pods -kubectl get pods -n apollo +gh workflow run "Deploy Router" --repo $GITHUB_ORG/reference-architecture \ + -f environment=dev \ + -f dry-run=false \ + -f debug=false -# Describe the supergraph to see conditions -kubectl describe supergraphs reference-architecture-prod -n apollo +gh workflow run "Deploy Router" --repo $GITHUB_ORG/reference-architecture \ + -f environment=prod \ + -f dry-run=false \ + -f debug=false ``` -Once deployed, an ingress will be created to access the router. In the case of AWS, it will be a domain name, and in the case of GCP, it'll be an IP. +Which will deploy the router and coprocessor into both environments (`dev` and `prod`), as well as an ingress to access the router on both. In the case of AWS, it will be a domain name, and in the case of GCP, it'll be an IP. Follow the below instructions for your cloud provider you are using. Please note that for both providers, the value for the ingress may take some time to become live, so you may need to give it a few minutes to process. @@ -426,7 +352,7 @@ Follow the below instructions for your cloud provider you are using. Please note ```sh kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=http://$(kubectl get ingress -n apollo -o jsonpath="{.*.*.status.loadBalancer.ingress.*.ip}") +ROUTER_HOSTNAME=http://$(kubectl get ingress -n router -o jsonpath="{.*.*.status.loadBalancer.ingress.*.ip}") open $ROUTER_HOSTNAME ``` @@ -434,7 +360,7 @@ open $ROUTER_HOSTNAME ```sh kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=$(kubectl get ingress -n apollo -o jsonpath="{.*.*.status.loadBalancer.ingress.*.hostname}") +ROUTER_HOSTNAME=$(kubectl get ingress -n router -o jsonpath="{.*.*.status.loadBalancer.ingress.*.hostname}") open http://$ROUTER_HOSTNAME ``` @@ -454,7 +380,7 @@ The last step to getting fully configured is to deploy the client to both enviro ```sh kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=http://$(kubectl get ingress -n apollo -o jsonpath="{.*.*.status.loadBalancer.ingress.*.ip}") +ROUTER_HOSTNAME=http://$(kubectl get ingress -n router -o jsonpath="{.*.*.status.loadBalancer.ingress.*.ip}") ``` Upon running the above commands, you'll have the Router page open and you can make requests against your newly deployed supergraph! @@ -463,7 +389,7 @@ Upon running the above commands, you'll have the Router page open and you can ma ```sh kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=$(kubectl get ingress -n apollo -o jsonpath="{.*.*.status.loadBalancer.ingress.*.hostname}") +ROUTER_HOSTNAME=$(kubectl get ingress -n router -o jsonpath="{.*.*.status.loadBalancer.ingress.*.hostname}") ``` Once you have the router hostname, you'll need to set it as a secret in the GitHub repository created. diff --git a/subgraphs/checkout/Dockerfile b/subgraphs/checkout/Dockerfile index 8e5f473..1f322e0 100644 --- a/subgraphs/checkout/Dockerfile +++ b/subgraphs/checkout/Dockerfile @@ -10,13 +10,10 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build -# Copy schema to /app directory for operator to access -RUN mkdir -p /app -COPY schema.graphql /app/schema.graphql - EXPOSE 4001 CMD [ "node", "--require","./dist/tracing.js","./dist/index.js" ] diff --git a/subgraphs/checkout/deploy/values.yaml b/subgraphs/checkout/deploy/values.yaml deleted file mode 100644 index cd0958c..0000000 --- a/subgraphs/checkout/deploy/values.yaml +++ /dev/null @@ -1,43 +0,0 @@ -replicaCount: 3 - -image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout - pullPolicy: Always - tag: main - -nameOverride: graphql -fullnameOverride: graphql - -serviceAccount: - create: false - -podAnnotations: {} - -securityContext: {} - -podSecurityContext: {} - -service: - type: ClusterIP - port: 4001 - -ingress: - enabled: false - className: "" - annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: checkout.local - paths: - - path: / - pathType: Prefix - tls: [] - -resources: {} - -autoscaling: - enabled: false - targetCPUUtilizationPercentage: 80 - minReplicas: 1 - maxReplicas: 5 diff --git a/subgraphs/checkout/k8s/subgraph-dev.yaml b/subgraphs/checkout/k8s/subgraph-dev.yaml deleted file mode 100644 index a1404ec..0000000 --- a/subgraphs/checkout/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: checkout - namespace: checkout - labels: - app: checkout - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.checkout.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/checkout:main - path: /app/schema.graphql - diff --git a/subgraphs/checkout/k8s/subgraph-prod.yaml b/subgraphs/checkout/k8s/subgraph-prod.yaml deleted file mode 100644 index 65a70cb..0000000 --- a/subgraphs/checkout/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: checkout - namespace: checkout - labels: - app: checkout - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.checkout.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/andywgarcia/reference-architecture/checkout:main - path: /app/schema.graphql - diff --git a/subgraphs/discovery/Dockerfile b/subgraphs/discovery/Dockerfile index 8e5f473..1f322e0 100644 --- a/subgraphs/discovery/Dockerfile +++ b/subgraphs/discovery/Dockerfile @@ -10,13 +10,10 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build -# Copy schema to /app directory for operator to access -RUN mkdir -p /app -COPY schema.graphql /app/schema.graphql - EXPOSE 4001 CMD [ "node", "--require","./dist/tracing.js","./dist/index.js" ] diff --git a/subgraphs/discovery/deploy/Chart.yaml b/subgraphs/discovery/deploy/Chart.yaml deleted file mode 100644 index 189e683..0000000 --- a/subgraphs/discovery/deploy/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: subgraph -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: main diff --git a/subgraphs/discovery/deploy/environments/dev.yaml b/subgraphs/discovery/deploy/environments/dev.yaml deleted file mode 100644 index 689f1c9..0000000 --- a/subgraphs/discovery/deploy/environments/dev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -requests: - cpu: 100m - memory: 256Mi diff --git a/subgraphs/discovery/deploy/environments/prod.yaml b/subgraphs/discovery/deploy/environments/prod.yaml deleted file mode 100644 index c478468..0000000 --- a/subgraphs/discovery/deploy/environments/prod.yaml +++ /dev/null @@ -1,6 +0,0 @@ -env: - - name: OTEL_HTTP_ENDPOINT - value: http://collector.monitoring:4318/v1/traces -requests: - cpu: 500m - memory: 512Mi diff --git a/subgraphs/discovery/deploy/templates/NOTES.txt b/subgraphs/discovery/deploy/templates/NOTES.txt deleted file mode 100644 index 026fa7b..0000000 --- a/subgraphs/discovery/deploy/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "subgraph.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "subgraph.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "subgraph.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "subgraph.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/subgraphs/discovery/deploy/templates/_helpers.tpl b/subgraphs/discovery/deploy/templates/_helpers.tpl deleted file mode 100644 index f789130..0000000 --- a/subgraphs/discovery/deploy/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "subgraph.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "subgraph.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "subgraph.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "subgraph.labels" -}} -helm.sh/chart: {{ include "subgraph.chart" . }} -{{ include "subgraph.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "subgraph.selectorLabels" -}} -app.kubernetes.io/name: {{ include "subgraph.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "subgraph.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "subgraph.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/subgraphs/discovery/deploy/templates/deployment.yaml b/subgraphs/discovery/deploy/templates/deployment.yaml deleted file mode 100644 index 7ff589d..0000000 --- a/subgraphs/discovery/deploy/templates/deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "subgraph.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "subgraph.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "subgraph.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 4001 - protocol: TCP - {{- with .Values.env }} - env: - {{ toYaml . | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/subgraphs/discovery/deploy/templates/hpa.yaml b/subgraphs/discovery/deploy/templates/hpa.yaml deleted file mode 100644 index 1b87527..0000000 --- a/subgraphs/discovery/deploy/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "subgraph-a.fullname" . }} - labels: - {{- include "subgraph-a.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "subgraph-a.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/subgraphs/discovery/deploy/templates/ingress.yaml b/subgraphs/discovery/deploy/templates/ingress.yaml deleted file mode 100644 index d92a8ae..0000000 --- a/subgraphs/discovery/deploy/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "subgraph.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/subgraphs/discovery/deploy/templates/service.yaml b/subgraphs/discovery/deploy/templates/service.yaml deleted file mode 100644 index 20e15b8..0000000 --- a/subgraphs/discovery/deploy/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "subgraph.selectorLabels" . | nindent 4 }} diff --git a/subgraphs/discovery/deploy/templates/serviceaccount.yaml b/subgraphs/discovery/deploy/templates/serviceaccount.yaml deleted file mode 100644 index 61e85b4..0000000 --- a/subgraphs/discovery/deploy/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "subgraph.serviceAccountName" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/subgraphs/discovery/deploy/templates/tests/test-connection.yaml b/subgraphs/discovery/deploy/templates/tests/test-connection.yaml deleted file mode 100644 index c86ef2c..0000000 --- a/subgraphs/discovery/deploy/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "subgraph.fullname" . }}-test-connection" - labels: - {{- include "subgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "subgraph.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/subgraphs/discovery/k8s/subgraph-dev.yaml b/subgraphs/discovery/k8s/subgraph-dev.yaml deleted file mode 100644 index 4890039..0000000 --- a/subgraphs/discovery/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: discovery - namespace: discovery - labels: - app: discovery - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.discovery.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/discovery:main - path: /app/schema.graphql diff --git a/subgraphs/discovery/k8s/subgraph-prod.yaml b/subgraphs/discovery/k8s/subgraph-prod.yaml deleted file mode 100644 index 4890039..0000000 --- a/subgraphs/discovery/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: discovery - namespace: discovery - labels: - app: discovery - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.discovery.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/discovery:main - path: /app/schema.graphql diff --git a/subgraphs/inventory/Dockerfile b/subgraphs/inventory/Dockerfile index 8e5f473..1f322e0 100644 --- a/subgraphs/inventory/Dockerfile +++ b/subgraphs/inventory/Dockerfile @@ -10,13 +10,10 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build -# Copy schema to /app directory for operator to access -RUN mkdir -p /app -COPY schema.graphql /app/schema.graphql - EXPOSE 4001 CMD [ "node", "--require","./dist/tracing.js","./dist/index.js" ] diff --git a/subgraphs/inventory/deploy/.helmignore b/subgraphs/inventory/deploy/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/subgraphs/inventory/deploy/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/subgraphs/inventory/deploy/environments/dev.yaml b/subgraphs/inventory/deploy/environments/dev.yaml deleted file mode 100644 index 689f1c9..0000000 --- a/subgraphs/inventory/deploy/environments/dev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -requests: - cpu: 100m - memory: 256Mi diff --git a/subgraphs/inventory/deploy/environments/prod.yaml b/subgraphs/inventory/deploy/environments/prod.yaml deleted file mode 100644 index c478468..0000000 --- a/subgraphs/inventory/deploy/environments/prod.yaml +++ /dev/null @@ -1,6 +0,0 @@ -env: - - name: OTEL_HTTP_ENDPOINT - value: http://collector.monitoring:4318/v1/traces -requests: - cpu: 500m - memory: 512Mi diff --git a/subgraphs/inventory/deploy/templates/NOTES.txt b/subgraphs/inventory/deploy/templates/NOTES.txt deleted file mode 100644 index 026fa7b..0000000 --- a/subgraphs/inventory/deploy/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "subgraph.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "subgraph.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "subgraph.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "subgraph.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/subgraphs/inventory/deploy/templates/_helpers.tpl b/subgraphs/inventory/deploy/templates/_helpers.tpl deleted file mode 100644 index f789130..0000000 --- a/subgraphs/inventory/deploy/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "subgraph.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "subgraph.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "subgraph.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "subgraph.labels" -}} -helm.sh/chart: {{ include "subgraph.chart" . }} -{{ include "subgraph.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "subgraph.selectorLabels" -}} -app.kubernetes.io/name: {{ include "subgraph.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "subgraph.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "subgraph.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/subgraphs/inventory/deploy/templates/deployment.yaml b/subgraphs/inventory/deploy/templates/deployment.yaml deleted file mode 100644 index 7ff589d..0000000 --- a/subgraphs/inventory/deploy/templates/deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "subgraph.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "subgraph.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "subgraph.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 4001 - protocol: TCP - {{- with .Values.env }} - env: - {{ toYaml . | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/subgraphs/inventory/deploy/templates/hpa.yaml b/subgraphs/inventory/deploy/templates/hpa.yaml deleted file mode 100644 index 1b87527..0000000 --- a/subgraphs/inventory/deploy/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "subgraph-a.fullname" . }} - labels: - {{- include "subgraph-a.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "subgraph-a.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/subgraphs/inventory/deploy/templates/ingress.yaml b/subgraphs/inventory/deploy/templates/ingress.yaml deleted file mode 100644 index d92a8ae..0000000 --- a/subgraphs/inventory/deploy/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "subgraph.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/subgraphs/inventory/deploy/templates/service.yaml b/subgraphs/inventory/deploy/templates/service.yaml deleted file mode 100644 index 20e15b8..0000000 --- a/subgraphs/inventory/deploy/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "subgraph.selectorLabels" . | nindent 4 }} diff --git a/subgraphs/inventory/deploy/templates/serviceaccount.yaml b/subgraphs/inventory/deploy/templates/serviceaccount.yaml deleted file mode 100644 index 61e85b4..0000000 --- a/subgraphs/inventory/deploy/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "subgraph.serviceAccountName" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/subgraphs/inventory/deploy/templates/tests/test-connection.yaml b/subgraphs/inventory/deploy/templates/tests/test-connection.yaml deleted file mode 100644 index c86ef2c..0000000 --- a/subgraphs/inventory/deploy/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "subgraph.fullname" . }}-test-connection" - labels: - {{- include "subgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "subgraph.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/subgraphs/inventory/deploy/values.yaml b/subgraphs/inventory/deploy/values.yaml deleted file mode 100644 index cd0958c..0000000 --- a/subgraphs/inventory/deploy/values.yaml +++ /dev/null @@ -1,43 +0,0 @@ -replicaCount: 3 - -image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout - pullPolicy: Always - tag: main - -nameOverride: graphql -fullnameOverride: graphql - -serviceAccount: - create: false - -podAnnotations: {} - -securityContext: {} - -podSecurityContext: {} - -service: - type: ClusterIP - port: 4001 - -ingress: - enabled: false - className: "" - annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: checkout.local - paths: - - path: / - pathType: Prefix - tls: [] - -resources: {} - -autoscaling: - enabled: false - targetCPUUtilizationPercentage: 80 - minReplicas: 1 - maxReplicas: 5 diff --git a/subgraphs/inventory/k8s/subgraph-dev.yaml b/subgraphs/inventory/k8s/subgraph-dev.yaml deleted file mode 100644 index a7fcb24..0000000 --- a/subgraphs/inventory/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: inventory - namespace: inventory - labels: - app: inventory - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.inventory.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/inventory:main - path: /app/schema.graphql diff --git a/subgraphs/inventory/k8s/subgraph-prod.yaml b/subgraphs/inventory/k8s/subgraph-prod.yaml deleted file mode 100644 index a7fcb24..0000000 --- a/subgraphs/inventory/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: inventory - namespace: inventory - labels: - app: inventory - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.inventory.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/inventory:main - path: /app/schema.graphql diff --git a/subgraphs/orders/Dockerfile b/subgraphs/orders/Dockerfile index 8e5f473..1f322e0 100644 --- a/subgraphs/orders/Dockerfile +++ b/subgraphs/orders/Dockerfile @@ -10,13 +10,10 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build -# Copy schema to /app directory for operator to access -RUN mkdir -p /app -COPY schema.graphql /app/schema.graphql - EXPOSE 4001 CMD [ "node", "--require","./dist/tracing.js","./dist/index.js" ] diff --git a/subgraphs/orders/deploy/.helmignore b/subgraphs/orders/deploy/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/subgraphs/orders/deploy/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/subgraphs/orders/deploy/Chart.yaml b/subgraphs/orders/deploy/Chart.yaml deleted file mode 100644 index 189e683..0000000 --- a/subgraphs/orders/deploy/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: subgraph -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: main diff --git a/subgraphs/orders/deploy/environments/dev.yaml b/subgraphs/orders/deploy/environments/dev.yaml deleted file mode 100644 index 689f1c9..0000000 --- a/subgraphs/orders/deploy/environments/dev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -requests: - cpu: 100m - memory: 256Mi diff --git a/subgraphs/orders/deploy/environments/prod.yaml b/subgraphs/orders/deploy/environments/prod.yaml deleted file mode 100644 index c478468..0000000 --- a/subgraphs/orders/deploy/environments/prod.yaml +++ /dev/null @@ -1,6 +0,0 @@ -env: - - name: OTEL_HTTP_ENDPOINT - value: http://collector.monitoring:4318/v1/traces -requests: - cpu: 500m - memory: 512Mi diff --git a/subgraphs/orders/deploy/templates/NOTES.txt b/subgraphs/orders/deploy/templates/NOTES.txt deleted file mode 100644 index 026fa7b..0000000 --- a/subgraphs/orders/deploy/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "subgraph.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "subgraph.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "subgraph.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "subgraph.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/subgraphs/orders/deploy/templates/_helpers.tpl b/subgraphs/orders/deploy/templates/_helpers.tpl deleted file mode 100644 index f789130..0000000 --- a/subgraphs/orders/deploy/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "subgraph.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "subgraph.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "subgraph.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "subgraph.labels" -}} -helm.sh/chart: {{ include "subgraph.chart" . }} -{{ include "subgraph.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "subgraph.selectorLabels" -}} -app.kubernetes.io/name: {{ include "subgraph.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "subgraph.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "subgraph.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/subgraphs/orders/deploy/templates/deployment.yaml b/subgraphs/orders/deploy/templates/deployment.yaml deleted file mode 100644 index 7ff589d..0000000 --- a/subgraphs/orders/deploy/templates/deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "subgraph.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "subgraph.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "subgraph.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 4001 - protocol: TCP - {{- with .Values.env }} - env: - {{ toYaml . | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/subgraphs/orders/deploy/templates/hpa.yaml b/subgraphs/orders/deploy/templates/hpa.yaml deleted file mode 100644 index 1b87527..0000000 --- a/subgraphs/orders/deploy/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "subgraph-a.fullname" . }} - labels: - {{- include "subgraph-a.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "subgraph-a.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/subgraphs/orders/deploy/templates/ingress.yaml b/subgraphs/orders/deploy/templates/ingress.yaml deleted file mode 100644 index d92a8ae..0000000 --- a/subgraphs/orders/deploy/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "subgraph.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/subgraphs/orders/deploy/templates/service.yaml b/subgraphs/orders/deploy/templates/service.yaml deleted file mode 100644 index 20e15b8..0000000 --- a/subgraphs/orders/deploy/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "subgraph.selectorLabels" . | nindent 4 }} diff --git a/subgraphs/orders/deploy/templates/serviceaccount.yaml b/subgraphs/orders/deploy/templates/serviceaccount.yaml deleted file mode 100644 index 61e85b4..0000000 --- a/subgraphs/orders/deploy/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "subgraph.serviceAccountName" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/subgraphs/orders/deploy/templates/tests/test-connection.yaml b/subgraphs/orders/deploy/templates/tests/test-connection.yaml deleted file mode 100644 index c86ef2c..0000000 --- a/subgraphs/orders/deploy/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "subgraph.fullname" . }}-test-connection" - labels: - {{- include "subgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "subgraph.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/subgraphs/orders/deploy/values.yaml b/subgraphs/orders/deploy/values.yaml deleted file mode 100644 index cd0958c..0000000 --- a/subgraphs/orders/deploy/values.yaml +++ /dev/null @@ -1,43 +0,0 @@ -replicaCount: 3 - -image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout - pullPolicy: Always - tag: main - -nameOverride: graphql -fullnameOverride: graphql - -serviceAccount: - create: false - -podAnnotations: {} - -securityContext: {} - -podSecurityContext: {} - -service: - type: ClusterIP - port: 4001 - -ingress: - enabled: false - className: "" - annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: checkout.local - paths: - - path: / - pathType: Prefix - tls: [] - -resources: {} - -autoscaling: - enabled: false - targetCPUUtilizationPercentage: 80 - minReplicas: 1 - maxReplicas: 5 diff --git a/subgraphs/orders/k8s/subgraph-dev.yaml b/subgraphs/orders/k8s/subgraph-dev.yaml deleted file mode 100644 index d4acc82..0000000 --- a/subgraphs/orders/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: orders - namespace: orders - labels: - app: orders - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.orders.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/orders:main - path: /app/schema.graphql diff --git a/subgraphs/orders/k8s/subgraph-prod.yaml b/subgraphs/orders/k8s/subgraph-prod.yaml deleted file mode 100644 index d4acc82..0000000 --- a/subgraphs/orders/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: orders - namespace: orders - labels: - app: orders - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.orders.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/orders:main - path: /app/schema.graphql diff --git a/subgraphs/products/Dockerfile b/subgraphs/products/Dockerfile index 8e5f473..1f322e0 100644 --- a/subgraphs/products/Dockerfile +++ b/subgraphs/products/Dockerfile @@ -10,13 +10,10 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build -# Copy schema to /app directory for operator to access -RUN mkdir -p /app -COPY schema.graphql /app/schema.graphql - EXPOSE 4001 CMD [ "node", "--require","./dist/tracing.js","./dist/index.js" ] diff --git a/subgraphs/products/deploy/.helmignore b/subgraphs/products/deploy/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/subgraphs/products/deploy/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/subgraphs/products/deploy/Chart.yaml b/subgraphs/products/deploy/Chart.yaml deleted file mode 100644 index 189e683..0000000 --- a/subgraphs/products/deploy/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: subgraph -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: main diff --git a/subgraphs/products/deploy/environments/dev.yaml b/subgraphs/products/deploy/environments/dev.yaml deleted file mode 100644 index 689f1c9..0000000 --- a/subgraphs/products/deploy/environments/dev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -requests: - cpu: 100m - memory: 256Mi diff --git a/subgraphs/products/deploy/environments/prod.yaml b/subgraphs/products/deploy/environments/prod.yaml deleted file mode 100644 index c478468..0000000 --- a/subgraphs/products/deploy/environments/prod.yaml +++ /dev/null @@ -1,6 +0,0 @@ -env: - - name: OTEL_HTTP_ENDPOINT - value: http://collector.monitoring:4318/v1/traces -requests: - cpu: 500m - memory: 512Mi diff --git a/subgraphs/products/deploy/templates/NOTES.txt b/subgraphs/products/deploy/templates/NOTES.txt deleted file mode 100644 index 026fa7b..0000000 --- a/subgraphs/products/deploy/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "subgraph.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "subgraph.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "subgraph.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "subgraph.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/subgraphs/products/deploy/templates/_helpers.tpl b/subgraphs/products/deploy/templates/_helpers.tpl deleted file mode 100644 index f789130..0000000 --- a/subgraphs/products/deploy/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "subgraph.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "subgraph.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "subgraph.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "subgraph.labels" -}} -helm.sh/chart: {{ include "subgraph.chart" . }} -{{ include "subgraph.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "subgraph.selectorLabels" -}} -app.kubernetes.io/name: {{ include "subgraph.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "subgraph.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "subgraph.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/subgraphs/products/deploy/templates/deployment.yaml b/subgraphs/products/deploy/templates/deployment.yaml deleted file mode 100644 index 7ff589d..0000000 --- a/subgraphs/products/deploy/templates/deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "subgraph.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "subgraph.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "subgraph.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 4001 - protocol: TCP - {{- with .Values.env }} - env: - {{ toYaml . | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/subgraphs/products/deploy/templates/hpa.yaml b/subgraphs/products/deploy/templates/hpa.yaml deleted file mode 100644 index 1b87527..0000000 --- a/subgraphs/products/deploy/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "subgraph-a.fullname" . }} - labels: - {{- include "subgraph-a.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "subgraph-a.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/subgraphs/products/deploy/templates/ingress.yaml b/subgraphs/products/deploy/templates/ingress.yaml deleted file mode 100644 index d92a8ae..0000000 --- a/subgraphs/products/deploy/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "subgraph.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/subgraphs/products/deploy/templates/service.yaml b/subgraphs/products/deploy/templates/service.yaml deleted file mode 100644 index 20e15b8..0000000 --- a/subgraphs/products/deploy/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "subgraph.selectorLabels" . | nindent 4 }} diff --git a/subgraphs/products/deploy/templates/serviceaccount.yaml b/subgraphs/products/deploy/templates/serviceaccount.yaml deleted file mode 100644 index 61e85b4..0000000 --- a/subgraphs/products/deploy/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "subgraph.serviceAccountName" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/subgraphs/products/deploy/templates/tests/test-connection.yaml b/subgraphs/products/deploy/templates/tests/test-connection.yaml deleted file mode 100644 index c86ef2c..0000000 --- a/subgraphs/products/deploy/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "subgraph.fullname" . }}-test-connection" - labels: - {{- include "subgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "subgraph.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/subgraphs/products/deploy/values.yaml b/subgraphs/products/deploy/values.yaml deleted file mode 100644 index cd0958c..0000000 --- a/subgraphs/products/deploy/values.yaml +++ /dev/null @@ -1,43 +0,0 @@ -replicaCount: 3 - -image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout - pullPolicy: Always - tag: main - -nameOverride: graphql -fullnameOverride: graphql - -serviceAccount: - create: false - -podAnnotations: {} - -securityContext: {} - -podSecurityContext: {} - -service: - type: ClusterIP - port: 4001 - -ingress: - enabled: false - className: "" - annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: checkout.local - paths: - - path: / - pathType: Prefix - tls: [] - -resources: {} - -autoscaling: - enabled: false - targetCPUUtilizationPercentage: 80 - minReplicas: 1 - maxReplicas: 5 diff --git a/subgraphs/products/k8s/subgraph-dev.yaml b/subgraphs/products/k8s/subgraph-dev.yaml deleted file mode 100644 index e0fbdc5..0000000 --- a/subgraphs/products/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: products - namespace: products - labels: - app: products - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.products.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/products:main - path: /app/schema.graphql diff --git a/subgraphs/products/k8s/subgraph-prod.yaml b/subgraphs/products/k8s/subgraph-prod.yaml deleted file mode 100644 index e0fbdc5..0000000 --- a/subgraphs/products/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: products - namespace: products - labels: - app: products - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.products.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/products:main - path: /app/schema.graphql diff --git a/subgraphs/reviews/Dockerfile b/subgraphs/reviews/Dockerfile index 8e5f473..1f322e0 100644 --- a/subgraphs/reviews/Dockerfile +++ b/subgraphs/reviews/Dockerfile @@ -10,13 +10,10 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build -# Copy schema to /app directory for operator to access -RUN mkdir -p /app -COPY schema.graphql /app/schema.graphql - EXPOSE 4001 CMD [ "node", "--require","./dist/tracing.js","./dist/index.js" ] diff --git a/subgraphs/reviews/deploy/.helmignore b/subgraphs/reviews/deploy/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/subgraphs/reviews/deploy/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/subgraphs/reviews/deploy/Chart.yaml b/subgraphs/reviews/deploy/Chart.yaml deleted file mode 100644 index 189e683..0000000 --- a/subgraphs/reviews/deploy/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: subgraph -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: main diff --git a/subgraphs/reviews/deploy/environments/dev.yaml b/subgraphs/reviews/deploy/environments/dev.yaml deleted file mode 100644 index 689f1c9..0000000 --- a/subgraphs/reviews/deploy/environments/dev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -requests: - cpu: 100m - memory: 256Mi diff --git a/subgraphs/reviews/deploy/environments/prod.yaml b/subgraphs/reviews/deploy/environments/prod.yaml deleted file mode 100644 index c478468..0000000 --- a/subgraphs/reviews/deploy/environments/prod.yaml +++ /dev/null @@ -1,6 +0,0 @@ -env: - - name: OTEL_HTTP_ENDPOINT - value: http://collector.monitoring:4318/v1/traces -requests: - cpu: 500m - memory: 512Mi diff --git a/subgraphs/reviews/deploy/templates/NOTES.txt b/subgraphs/reviews/deploy/templates/NOTES.txt deleted file mode 100644 index 026fa7b..0000000 --- a/subgraphs/reviews/deploy/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "subgraph.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "subgraph.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "subgraph.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "subgraph.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/subgraphs/reviews/deploy/templates/_helpers.tpl b/subgraphs/reviews/deploy/templates/_helpers.tpl deleted file mode 100644 index f789130..0000000 --- a/subgraphs/reviews/deploy/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "subgraph.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "subgraph.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "subgraph.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "subgraph.labels" -}} -helm.sh/chart: {{ include "subgraph.chart" . }} -{{ include "subgraph.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "subgraph.selectorLabels" -}} -app.kubernetes.io/name: {{ include "subgraph.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "subgraph.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "subgraph.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/subgraphs/reviews/deploy/templates/deployment.yaml b/subgraphs/reviews/deploy/templates/deployment.yaml deleted file mode 100644 index 7ff589d..0000000 --- a/subgraphs/reviews/deploy/templates/deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "subgraph.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "subgraph.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "subgraph.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 4001 - protocol: TCP - {{- with .Values.env }} - env: - {{ toYaml . | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/subgraphs/reviews/deploy/templates/hpa.yaml b/subgraphs/reviews/deploy/templates/hpa.yaml deleted file mode 100644 index 1b87527..0000000 --- a/subgraphs/reviews/deploy/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "subgraph-a.fullname" . }} - labels: - {{- include "subgraph-a.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "subgraph-a.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/subgraphs/reviews/deploy/templates/ingress.yaml b/subgraphs/reviews/deploy/templates/ingress.yaml deleted file mode 100644 index d92a8ae..0000000 --- a/subgraphs/reviews/deploy/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "subgraph.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/subgraphs/reviews/deploy/templates/service.yaml b/subgraphs/reviews/deploy/templates/service.yaml deleted file mode 100644 index 20e15b8..0000000 --- a/subgraphs/reviews/deploy/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "subgraph.selectorLabels" . | nindent 4 }} diff --git a/subgraphs/reviews/deploy/templates/serviceaccount.yaml b/subgraphs/reviews/deploy/templates/serviceaccount.yaml deleted file mode 100644 index 61e85b4..0000000 --- a/subgraphs/reviews/deploy/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "subgraph.serviceAccountName" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/subgraphs/reviews/deploy/templates/tests/test-connection.yaml b/subgraphs/reviews/deploy/templates/tests/test-connection.yaml deleted file mode 100644 index c86ef2c..0000000 --- a/subgraphs/reviews/deploy/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "subgraph.fullname" . }}-test-connection" - labels: - {{- include "subgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "subgraph.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/subgraphs/reviews/deploy/values.yaml b/subgraphs/reviews/deploy/values.yaml deleted file mode 100644 index cd0958c..0000000 --- a/subgraphs/reviews/deploy/values.yaml +++ /dev/null @@ -1,43 +0,0 @@ -replicaCount: 3 - -image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout - pullPolicy: Always - tag: main - -nameOverride: graphql -fullnameOverride: graphql - -serviceAccount: - create: false - -podAnnotations: {} - -securityContext: {} - -podSecurityContext: {} - -service: - type: ClusterIP - port: 4001 - -ingress: - enabled: false - className: "" - annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: checkout.local - paths: - - path: / - pathType: Prefix - tls: [] - -resources: {} - -autoscaling: - enabled: false - targetCPUUtilizationPercentage: 80 - minReplicas: 1 - maxReplicas: 5 diff --git a/subgraphs/reviews/k8s/subgraph-dev.yaml b/subgraphs/reviews/k8s/subgraph-dev.yaml deleted file mode 100644 index 55b87d9..0000000 --- a/subgraphs/reviews/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: reviews - namespace: reviews - labels: - app: reviews - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.reviews.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/reviews:main - path: /app/schema.graphql diff --git a/subgraphs/reviews/k8s/subgraph-prod.yaml b/subgraphs/reviews/k8s/subgraph-prod.yaml deleted file mode 100644 index 55b87d9..0000000 --- a/subgraphs/reviews/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: reviews - namespace: reviews - labels: - app: reviews - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.reviews.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/reviews:main - path: /app/schema.graphql diff --git a/subgraphs/shipping/Dockerfile b/subgraphs/shipping/Dockerfile index 8e5f473..1f322e0 100644 --- a/subgraphs/shipping/Dockerfile +++ b/subgraphs/shipping/Dockerfile @@ -10,13 +10,10 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build -# Copy schema to /app directory for operator to access -RUN mkdir -p /app -COPY schema.graphql /app/schema.graphql - EXPOSE 4001 CMD [ "node", "--require","./dist/tracing.js","./dist/index.js" ] diff --git a/subgraphs/shipping/deploy/.helmignore b/subgraphs/shipping/deploy/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/subgraphs/shipping/deploy/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/subgraphs/shipping/deploy/Chart.yaml b/subgraphs/shipping/deploy/Chart.yaml deleted file mode 100644 index 189e683..0000000 --- a/subgraphs/shipping/deploy/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: subgraph -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: main diff --git a/subgraphs/shipping/deploy/environments/dev.yaml b/subgraphs/shipping/deploy/environments/dev.yaml deleted file mode 100644 index 689f1c9..0000000 --- a/subgraphs/shipping/deploy/environments/dev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -requests: - cpu: 100m - memory: 256Mi diff --git a/subgraphs/shipping/deploy/environments/prod.yaml b/subgraphs/shipping/deploy/environments/prod.yaml deleted file mode 100644 index c478468..0000000 --- a/subgraphs/shipping/deploy/environments/prod.yaml +++ /dev/null @@ -1,6 +0,0 @@ -env: - - name: OTEL_HTTP_ENDPOINT - value: http://collector.monitoring:4318/v1/traces -requests: - cpu: 500m - memory: 512Mi diff --git a/subgraphs/shipping/deploy/templates/NOTES.txt b/subgraphs/shipping/deploy/templates/NOTES.txt deleted file mode 100644 index 026fa7b..0000000 --- a/subgraphs/shipping/deploy/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "subgraph.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "subgraph.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "subgraph.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "subgraph.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/subgraphs/shipping/deploy/templates/_helpers.tpl b/subgraphs/shipping/deploy/templates/_helpers.tpl deleted file mode 100644 index f789130..0000000 --- a/subgraphs/shipping/deploy/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "subgraph.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "subgraph.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "subgraph.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "subgraph.labels" -}} -helm.sh/chart: {{ include "subgraph.chart" . }} -{{ include "subgraph.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "subgraph.selectorLabels" -}} -app.kubernetes.io/name: {{ include "subgraph.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "subgraph.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "subgraph.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/subgraphs/shipping/deploy/templates/deployment.yaml b/subgraphs/shipping/deploy/templates/deployment.yaml deleted file mode 100644 index 7ff589d..0000000 --- a/subgraphs/shipping/deploy/templates/deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "subgraph.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "subgraph.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "subgraph.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 4001 - protocol: TCP - {{- with .Values.env }} - env: - {{ toYaml . | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/subgraphs/shipping/deploy/templates/hpa.yaml b/subgraphs/shipping/deploy/templates/hpa.yaml deleted file mode 100644 index 1b87527..0000000 --- a/subgraphs/shipping/deploy/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "subgraph-a.fullname" . }} - labels: - {{- include "subgraph-a.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "subgraph-a.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/subgraphs/shipping/deploy/templates/ingress.yaml b/subgraphs/shipping/deploy/templates/ingress.yaml deleted file mode 100644 index d92a8ae..0000000 --- a/subgraphs/shipping/deploy/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "subgraph.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/subgraphs/shipping/deploy/templates/service.yaml b/subgraphs/shipping/deploy/templates/service.yaml deleted file mode 100644 index 20e15b8..0000000 --- a/subgraphs/shipping/deploy/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "subgraph.selectorLabels" . | nindent 4 }} diff --git a/subgraphs/shipping/deploy/templates/serviceaccount.yaml b/subgraphs/shipping/deploy/templates/serviceaccount.yaml deleted file mode 100644 index 61e85b4..0000000 --- a/subgraphs/shipping/deploy/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "subgraph.serviceAccountName" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/subgraphs/shipping/deploy/templates/tests/test-connection.yaml b/subgraphs/shipping/deploy/templates/tests/test-connection.yaml deleted file mode 100644 index c86ef2c..0000000 --- a/subgraphs/shipping/deploy/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "subgraph.fullname" . }}-test-connection" - labels: - {{- include "subgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "subgraph.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/subgraphs/shipping/deploy/values.yaml b/subgraphs/shipping/deploy/values.yaml deleted file mode 100644 index cd0958c..0000000 --- a/subgraphs/shipping/deploy/values.yaml +++ /dev/null @@ -1,43 +0,0 @@ -replicaCount: 3 - -image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout - pullPolicy: Always - tag: main - -nameOverride: graphql -fullnameOverride: graphql - -serviceAccount: - create: false - -podAnnotations: {} - -securityContext: {} - -podSecurityContext: {} - -service: - type: ClusterIP - port: 4001 - -ingress: - enabled: false - className: "" - annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: checkout.local - paths: - - path: / - pathType: Prefix - tls: [] - -resources: {} - -autoscaling: - enabled: false - targetCPUUtilizationPercentage: 80 - minReplicas: 1 - maxReplicas: 5 diff --git a/subgraphs/shipping/k8s/subgraph-dev.yaml b/subgraphs/shipping/k8s/subgraph-dev.yaml deleted file mode 100644 index bc31d82..0000000 --- a/subgraphs/shipping/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: shipping - namespace: shipping - labels: - app: shipping - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.shipping.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/shipping:main - path: /app/schema.graphql diff --git a/subgraphs/shipping/k8s/subgraph-prod.yaml b/subgraphs/shipping/k8s/subgraph-prod.yaml deleted file mode 100644 index bc31d82..0000000 --- a/subgraphs/shipping/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: shipping - namespace: shipping - labels: - app: shipping - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.shipping.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/shipping:main - path: /app/schema.graphql diff --git a/subgraphs/users/Dockerfile b/subgraphs/users/Dockerfile index 8e5f473..5aae39f 100644 --- a/subgraphs/users/Dockerfile +++ b/subgraphs/users/Dockerfile @@ -10,13 +10,11 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . - +COPY schema.graphql . +COPY keys ./keys RUN npm run build -# Copy schema to /app directory for operator to access -RUN mkdir -p /app -COPY schema.graphql /app/schema.graphql - +# COPY ./src/keys /dist/keys EXPOSE 4001 CMD [ "node", "--require","./dist/tracing.js","./dist/index.js" ] diff --git a/subgraphs/users/deploy/.helmignore b/subgraphs/users/deploy/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/subgraphs/users/deploy/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/subgraphs/users/deploy/Chart.yaml b/subgraphs/users/deploy/Chart.yaml deleted file mode 100644 index 189e683..0000000 --- a/subgraphs/users/deploy/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: subgraph -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: main diff --git a/subgraphs/users/deploy/environments/dev.yaml b/subgraphs/users/deploy/environments/dev.yaml deleted file mode 100644 index 689f1c9..0000000 --- a/subgraphs/users/deploy/environments/dev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -requests: - cpu: 100m - memory: 256Mi diff --git a/subgraphs/users/deploy/environments/prod.yaml b/subgraphs/users/deploy/environments/prod.yaml deleted file mode 100644 index c478468..0000000 --- a/subgraphs/users/deploy/environments/prod.yaml +++ /dev/null @@ -1,6 +0,0 @@ -env: - - name: OTEL_HTTP_ENDPOINT - value: http://collector.monitoring:4318/v1/traces -requests: - cpu: 500m - memory: 512Mi diff --git a/subgraphs/users/deploy/templates/NOTES.txt b/subgraphs/users/deploy/templates/NOTES.txt deleted file mode 100644 index 026fa7b..0000000 --- a/subgraphs/users/deploy/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "subgraph.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "subgraph.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "subgraph.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "subgraph.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/subgraphs/users/deploy/templates/_helpers.tpl b/subgraphs/users/deploy/templates/_helpers.tpl deleted file mode 100644 index f789130..0000000 --- a/subgraphs/users/deploy/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "subgraph.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "subgraph.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "subgraph.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "subgraph.labels" -}} -helm.sh/chart: {{ include "subgraph.chart" . }} -{{ include "subgraph.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "subgraph.selectorLabels" -}} -app.kubernetes.io/name: {{ include "subgraph.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "subgraph.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "subgraph.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/subgraphs/users/deploy/templates/deployment.yaml b/subgraphs/users/deploy/templates/deployment.yaml deleted file mode 100644 index 7ff589d..0000000 --- a/subgraphs/users/deploy/templates/deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "subgraph.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "subgraph.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "subgraph.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 4001 - protocol: TCP - {{- with .Values.env }} - env: - {{ toYaml . | nindent 12 }} - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/subgraphs/users/deploy/templates/hpa.yaml b/subgraphs/users/deploy/templates/hpa.yaml deleted file mode 100644 index 1b87527..0000000 --- a/subgraphs/users/deploy/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "subgraph-a.fullname" . }} - labels: - {{- include "subgraph-a.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "subgraph-a.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/subgraphs/users/deploy/templates/ingress.yaml b/subgraphs/users/deploy/templates/ingress.yaml deleted file mode 100644 index d92a8ae..0000000 --- a/subgraphs/users/deploy/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "subgraph.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/subgraphs/users/deploy/templates/service.yaml b/subgraphs/users/deploy/templates/service.yaml deleted file mode 100644 index 20e15b8..0000000 --- a/subgraphs/users/deploy/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "subgraph.fullname" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "subgraph.selectorLabels" . | nindent 4 }} diff --git a/subgraphs/users/deploy/templates/serviceaccount.yaml b/subgraphs/users/deploy/templates/serviceaccount.yaml deleted file mode 100644 index 61e85b4..0000000 --- a/subgraphs/users/deploy/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "subgraph.serviceAccountName" . }} - labels: - {{- include "subgraph.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/subgraphs/users/deploy/templates/tests/test-connection.yaml b/subgraphs/users/deploy/templates/tests/test-connection.yaml deleted file mode 100644 index c86ef2c..0000000 --- a/subgraphs/users/deploy/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "subgraph.fullname" . }}-test-connection" - labels: - {{- include "subgraph.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "subgraph.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/subgraphs/users/deploy/values.yaml b/subgraphs/users/deploy/values.yaml deleted file mode 100644 index cd0958c..0000000 --- a/subgraphs/users/deploy/values.yaml +++ /dev/null @@ -1,43 +0,0 @@ -replicaCount: 3 - -image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout - pullPolicy: Always - tag: main - -nameOverride: graphql -fullnameOverride: graphql - -serviceAccount: - create: false - -podAnnotations: {} - -securityContext: {} - -podSecurityContext: {} - -service: - type: ClusterIP - port: 4001 - -ingress: - enabled: false - className: "" - annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: checkout.local - paths: - - path: / - pathType: Prefix - tls: [] - -resources: {} - -autoscaling: - enabled: false - targetCPUUtilizationPercentage: 80 - minReplicas: 1 - maxReplicas: 5 diff --git a/subgraphs/users/k8s/subgraph-dev.yaml b/subgraphs/users/k8s/subgraph-dev.yaml deleted file mode 100644 index 2d63703..0000000 --- a/subgraphs/users/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: users - namespace: users - labels: - app: users - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.users.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/users:main - path: /app/schema.graphql diff --git a/subgraphs/users/k8s/subgraph-prod.yaml b/subgraphs/users/k8s/subgraph-prod.yaml deleted file mode 100644 index 2d63703..0000000 --- a/subgraphs/users/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: users - namespace: users - labels: - app: users - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.users.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/users:main - path: /app/schema.graphql diff --git a/terraform/aws/create_graph.sh b/terraform/aws/create_graph.sh index 9631020..62c108f 100755 --- a/terraform/aws/create_graph.sh +++ b/terraform/aws/create_graph.sh @@ -79,70 +79,22 @@ fi GRAPH_KEY=$(echo $CREATE_RESP | jq -r ".data.newService.apiKeys[0].token") -# Create Operator API key for the operator to use -echo "Creating Operator API key..." - -CREATE_OPERATOR_KEY_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header "apollographql-client-name: reference-architecture" - --header "apollographql-client-version: 1.0" - --header 'content-type: application/json' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreateOperatorKey(\$name: String!, \$type: GraphOsKeyType!, \$organizationId: ID!) { organization(id: \$organizationId) { createKey(name: \$name, type: \$type) { id keyName expiresAt token } } }\",\"variables\":{\"name\":\"operator\",\"type\":\"OPERATOR\",\"organizationId\":\"$ACCOUNT_ID\"}}" -) - -CREATE_OPERATOR_KEY_RESP=$(curl "${CREATE_OPERATOR_KEY_ARGS[@]}") - -OPERATOR_KEY=$(echo $CREATE_OPERATOR_KEY_RESP | jq -r ".data.organization.createKey.token") -if [[ "$OPERATOR_KEY" == "null" ]]; then - echo "Error creating operator key" - echo $CREATE_OPERATOR_KEY_RESP | jq . - exit 1 -fi - -echo "Operator key created successfully" - -# Note: Subgraph schema publishing is now handled by the Apollo GraphOS Operator -# when Subgraph CRDs are deployed. No manual rover publish commands needed. -# We create variants by publishing dummy subgraphs to them. - -echo "Creating dev and prod variants by publishing dummy subgraphs..." - for variant in "${VARIANTS[@]}"; do - echo "Creating variant: $variant" - - PUBLISH_ARGS=( - --silent - --header "x-api-key: $GRAPH_KEY" - --header "apollographql-client-name: reference-architecture" - --header "apollographql-client-version: 1.0" - --header 'content-type: application/json' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation PublishSubgraph(\$graphId: ID!, \$graphVariant: String!, \$name: String!, \$revision: String!, \$activePartialSchema: PartialSchemaInput!, \$url: String) { graph(id: \$graphId) { publishSubgraph(graphVariant: \$graphVariant, name: \$name, revision: \$revision, activePartialSchema: \$activePartialSchema, url: \$url) { subgraphsCreated errors { message locations { column line } code } wasCreated wasUpdated } } }\",\"variables\":{\"graphId\":\"$GRAPH_ID\",\"graphVariant\":\"$variant\",\"name\":\"temp-subgraph\",\"revision\":\"1\",\"activePartialSchema\":{\"sdl\":\"type Query { temp: String }\"},\"url\":\"http://localhost:1234\"}}" - ) - - PUBLISH_RESP=$(curl "${PUBLISH_ARGS[@]}") - - if [[ $(echo $PUBLISH_RESP | jq -r ".data.graph.publishSubgraph.errors | length") > 0 ]]; then - echo "Error creating variant $variant" - echo $PUBLISH_RESP | jq . - exit 1 - fi - - echo "Created variant: $variant" + for folder in ../../subgraphs/*; do + if [[ $folder == *"node_modules"* ]]; then + continue + fi + rover subgraph publish $GRAPH_ID@$variant --name $(basename $folder) --routing-url http://graphql.$(basename $folder).svc.cluster.local:4001 --schema $folder/schema.graphql --client-timeout 120 + done done -# Create persisted query lists for dev and prod # dev CREATE_PQ_ARGS_DEV=( --silent --header "x-api-key: $APOLLO_KEY" --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!, \$linkedVariants: [String!]) {\n graph(id: \$graphId) {\n createPersistedQueryList(name: \$name, linkedVariants: \$linkedVariants) {\n ... on CreatePersistedQueryListResult {\n persistedQueryList {\n id\n }\n }\n }\n }\n}\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\",\"linkedVariants\":[\"$GRAPH_ID@dev\"]}}" ) if [[ $HEADER != "" ]]; then @@ -164,11 +116,9 @@ UPDATE_DEV_PQ_LIST_ARGS=( --silent --request POST --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" + --header 'content-type: application/json' + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) {\\n graph(id: \$graphId) {\\n variant(name: \$name) {\\n linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) {\\n __typename ... on LinkPersistedQueryListResult {\\n persistedQueryList {\\n id\\n }\\n }\\n }\\n }\\n }\\n}\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" ) if [[ $HEADER != "" ]]; then @@ -177,10 +127,10 @@ fi UPDATE_DEV_PQ_LIST_RESP=$(curl "${UPDATE_DEV_PQ_LIST_ARGS[@]}") -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for dev" +IS_SUCCESS=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.persistedQueryList") +if [[ "$IS_SUCCESS" == "null" ]]; then + echo "Error updating pq list for dev" + echo ${UPDATE_DEV_PQ_LIST_ARGS[@]} echo $UPDATE_DEV_PQ_LIST_RESP | jq . exit 1 fi @@ -190,10 +140,8 @@ CREATE_PQ_ARGS_PROD=( --silent --header "x-api-key: $APOLLO_KEY" --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!, \$linkedVariants: [String!]) {\n graph(id: \$graphId) {\n createPersistedQueryList(name: \$name, linkedVariants: \$linkedVariants) {\n ... on CreatePersistedQueryListResult {\n persistedQueryList {\n id\n }\n }\n }\n }\n}\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\",\"linkedVariants\":[\"prod\"]}}" ) if [[ $HEADER != "" ]]; then @@ -214,11 +162,9 @@ UPDATE_PROD_PQ_LIST_ARGS=( --silent --request POST --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" + --header 'content-type: application/json' + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) {\\n graph(id: \$graphId) {\\n variant(name: \$name) {\\n linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) {\\n ... on LinkPersistedQueryListResult {\\n persistedQueryList {\\n id\\n }\\n }\\n }\\n }\\n }\\n}\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" ) if [[ $HEADER != "" ]]; then UPDATE_PROD_PQ_LIST_ARGS+=(--header "$HEADER") @@ -226,11 +172,10 @@ fi UPDATE_PROD_PQ_LIST_RESP=$(curl "${UPDATE_PROD_PQ_LIST_ARGS[@]}") -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for prod" - echo $UPDATE_PROD_PQ_LIST_RESP | jq . +IS_SUCCESS=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.persistedQueryList") +if [[ "$IS_SUCCESS" == "null" ]]; then + echo "Error updating pq list for prod" + echo $CREATE_PQ_PROD_RESP | jq . exit 1 fi @@ -241,7 +186,5 @@ echo "export TF_VAR_apollo_key=\"$GRAPH_KEY\"" >> .env echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env -echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo "export GITHUB_ORG=\"$(git remote get-url origin 2>/dev/null | sed -E 's|.*github.com/([^/]+)/.*|\1|' || echo 'apollosolutions')\"" >> .env echo '' >> .env echo 'Re-run `source .env` to load them.' diff --git a/terraform/aws/setup_clusters.sh b/terraform/aws/setup_clusters.sh index 59a207f..0c4819c 100755 --- a/terraform/aws/setup_clusters.sh +++ b/terraform/aws/setup_clusters.sh @@ -3,12 +3,17 @@ set -euxo pipefail # default vars CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -PROJECT_REGION=${PROJECT_REGION:-"us-east1"} +PROJECT_REGION=${PROJECT_REGION:-"us-east-1"} PROJECT_CLUSTERS=("${CLUSTER_PREFIX}-dev" "${CLUSTER_PREFIX}-prod") # end default vars -if [[ $(which gcloud) == "" ]]; then - echo "gcloud not installed" +if [[ $(which aws) == "" ]]; then + echo "aws not installed; please visit https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html" + exit 1 +fi + +if [[ $(which eksctl) == "" ]]; then + echo "eksctl not installed; please visit https://eksctl.io/introduction/?h=install#installation" exit 1 fi @@ -22,64 +27,44 @@ if [[ $(which kubectx) == "" ]]; then exit 1 fi -if [[ -z "$PROJECT_ID" ]]; then - echo "Must provide PROJECT_ID in environment" 1>&2 - exit 1 -fi +# get AWS account ID (numeric) +ACCOUNT_ID=`aws sts get-caller-identity --output text --query Account` + +curl -o iam_policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.4/docs/install/iam_policy.json +aws iam create-policy \ + --policy-name AWSLoadBalancerControllerIAMPolicy \ + --policy-document file://iam_policy.json || echo "" +curl -Lo v2_4_4_ingclass.yaml https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.4.4/v2_4_4_ingclass.yaml environment_setup(){ echo "Configuring Kubeconfig for ${1}..." - gcloud container clusters get-credentials ${1} --zone ${PROJECT_REGION} --project ${PROJECT_ID} - - # short context aliases: supports `kubectx apollo-supergraph-k8s-dev` + # https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-configure-kubectl + eksctl utils write-kubeconfig --cluster=${1} --region=${PROJECT_REGION} kubectx ${1}=. + # https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html + # install LB Controller + kubectl apply \ + --validate=false \ + -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + sleep 15 - # monitoring setup: namespace, service account, and binding - # the service account name matches the otel collector's service account in its helm chart - kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - - kubectl create serviceaccount -n "monitoring" "metrics-writer" --dry-run=client -o yaml | kubectl apply -f - - kubectl annotate serviceaccount -n "monitoring" "metrics-writer" "iam.gke.io/gcp-service-account=${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" --overwrite - gcloud iam service-accounts add-iam-policy-binding \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${PROJECT_ID}.svc.id.goog[monitoring/metrics-writer]" \ - "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" - - # Apollo GraphOS Operator setup - echo "Installing Apollo GraphOS Operator..." - kubectl create namespace apollo-operator --dry-run=client -o yaml | kubectl apply -f - - kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - - - # Create operator API key secret (requires OPERATOR_KEY to be set) - if [[ -n "$OPERATOR_KEY" ]]; then - kubectl create secret generic apollo-api-key \ - --from-literal="APOLLO_KEY=$OPERATOR_KEY" \ - -n apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - echo "Operator API key secret created" - else - echo "Warning: OPERATOR_KEY not set. Operator secret not created." - fi - - # Install operator using Helm - if [[ $(which helm) != "" ]]; then - helm upgrade --install --atomic apollo-operator \ - oci://registry-1.docker.io/apollograph/operator-chart \ - -n apollo-operator \ - --create-namespace \ - -f - < 0 ]]; then - echo "Error creating variant $variant" - echo $PUBLISH_RESP | jq . - exit 1 - fi - - echo "Created variant: $variant" + for folder in ../../subgraphs/*; do + if [[ $folder == *"node_modules"* ]]; then + continue + fi + rover subgraph publish $GRAPH_ID@$variant --name $(basename $folder) --routing-url http://graphql.$(basename $folder).svc.cluster.local:4001 --schema $folder/schema.graphql --client-timeout 120 + done done -# Create persisted query lists for dev and prod # dev CREATE_PQ_ARGS_DEV=( --silent --header "x-api-key: $APOLLO_KEY" --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!, \$linkedVariants: [String!]) {\n graph(id: \$graphId) {\n createPersistedQueryList(name: \$name, linkedVariants: \$linkedVariants) {\n ... on CreatePersistedQueryListResult {\n persistedQueryList {\n id\n }\n }\n }\n }\n}\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\",\"linkedVariants\":[\"$GRAPH_ID@dev\"]}}" ) if [[ $HEADER != "" ]]; then @@ -164,11 +116,9 @@ UPDATE_DEV_PQ_LIST_ARGS=( --silent --request POST --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" + --header 'content-type: application/json' + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) {\\n graph(id: \$graphId) {\\n variant(name: \$name) {\\n linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) {\\n __typename ... on LinkPersistedQueryListResult {\\n persistedQueryList {\\n id\\n }\\n }\\n }\\n }\\n }\\n}\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" ) if [[ $HEADER != "" ]]; then @@ -177,10 +127,10 @@ fi UPDATE_DEV_PQ_LIST_RESP=$(curl "${UPDATE_DEV_PQ_LIST_ARGS[@]}") -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for dev" +IS_SUCCESS=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.persistedQueryList") +if [[ "$IS_SUCCESS" == "null" ]]; then + echo "Error updating pq list for dev" + echo ${UPDATE_DEV_PQ_LIST_ARGS[@]} echo $UPDATE_DEV_PQ_LIST_RESP | jq . exit 1 fi @@ -190,10 +140,8 @@ CREATE_PQ_ARGS_PROD=( --silent --header "x-api-key: $APOLLO_KEY" --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!, \$linkedVariants: [String!]) {\n graph(id: \$graphId) {\n createPersistedQueryList(name: \$name, linkedVariants: \$linkedVariants) {\n ... on CreatePersistedQueryListResult {\n persistedQueryList {\n id\n }\n }\n }\n }\n}\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\",\"linkedVariants\":[\"prod\"]}}" ) if [[ $HEADER != "" ]]; then @@ -214,11 +162,9 @@ UPDATE_PROD_PQ_LIST_ARGS=( --silent --request POST --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" + --header 'content-type: application/json' + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) {\\n graph(id: \$graphId) {\\n variant(name: \$name) {\\n linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) {\\n ... on LinkPersistedQueryListResult {\\n persistedQueryList {\\n id\\n }\\n }\\n }\\n }\\n }\\n}\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" ) if [[ $HEADER != "" ]]; then UPDATE_PROD_PQ_LIST_ARGS+=(--header "$HEADER") @@ -226,11 +172,10 @@ fi UPDATE_PROD_PQ_LIST_RESP=$(curl "${UPDATE_PROD_PQ_LIST_ARGS[@]}") -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for prod" - echo $UPDATE_PROD_PQ_LIST_RESP | jq . +IS_SUCCESS=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.persistedQueryList") +if [[ "$IS_SUCCESS" == "null" ]]; then + echo "Error updating pq list for prod" + echo $CREATE_PQ_PROD_RESP | jq . exit 1 fi @@ -241,7 +186,5 @@ echo "export TF_VAR_apollo_key=\"$GRAPH_KEY\"" >> .env echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env -echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo "export GITHUB_ORG=\"$(git remote get-url origin 2>/dev/null | sed -E 's|.*github.com/([^/]+)/.*|\1|' || echo 'apollosolutions')\"" >> .env echo '' >> .env echo 'Re-run `source .env` to load them.' diff --git a/terraform/gcp/setup_clusters.sh b/terraform/gcp/setup_clusters.sh index 59a207f..db149ac 100755 --- a/terraform/gcp/setup_clusters.sh +++ b/terraform/gcp/setup_clusters.sh @@ -43,43 +43,6 @@ environment_setup(){ --role roles/iam.workloadIdentityUser \ --member "serviceAccount:${PROJECT_ID}.svc.id.goog[monitoring/metrics-writer]" \ "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" - - # Apollo GraphOS Operator setup - echo "Installing Apollo GraphOS Operator..." - kubectl create namespace apollo-operator --dry-run=client -o yaml | kubectl apply -f - - kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - - - # Create operator API key secret (requires OPERATOR_KEY to be set) - if [[ -n "$OPERATOR_KEY" ]]; then - kubectl create secret generic apollo-api-key \ - --from-literal="APOLLO_KEY=$OPERATOR_KEY" \ - -n apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - echo "Operator API key secret created" - else - echo "Warning: OPERATOR_KEY not set. Operator secret not created." - fi - - # Install operator using Helm - if [[ $(which helm) != "" ]]; then - helm upgrade --install --atomic apollo-operator \ - oci://registry-1.docker.io/apollograph/operator-chart \ - -n apollo-operator \ - --create-namespace \ - -f - < 0 ]]; then - echo "Error creating variant $variant" - echo $PUBLISH_RESP | jq . - exit 1 - fi - - echo "Created variant: $variant" + for folder in ../../subgraphs/*; do + if [[ $folder == *"node_modules"* ]]; then + continue + fi + rover subgraph publish $GRAPH_ID@$variant --name $(basename $folder) --routing-url http://graphql.$(basename $folder).svc.cluster.local:4001 --schema $folder/schema.graphql --client-timeout 120 + done done -# Create persisted query lists for dev and prod # dev CREATE_PQ_ARGS_DEV=( --silent --header "x-api-key: $APOLLO_KEY" --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!, \$linkedVariants: [String!]) {\n graph(id: \$graphId) {\n createPersistedQueryList(name: \$name, linkedVariants: \$linkedVariants) {\n ... on CreatePersistedQueryListResult {\n persistedQueryList {\n id\n }\n }\n }\n }\n}\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\",\"linkedVariants\":[\"$GRAPH_ID@dev\"]}}" ) if [[ $HEADER != "" ]]; then @@ -164,11 +116,9 @@ UPDATE_DEV_PQ_LIST_ARGS=( --silent --request POST --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" + --header 'content-type: application/json' + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) {\\n graph(id: \$graphId) {\\n variant(name: \$name) {\\n linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) {\\n __typename ... on LinkPersistedQueryListResult {\\n persistedQueryList {\\n id\\n }\\n }\\n }\\n }\\n }\\n}\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" ) if [[ $HEADER != "" ]]; then @@ -177,10 +127,10 @@ fi UPDATE_DEV_PQ_LIST_RESP=$(curl "${UPDATE_DEV_PQ_LIST_ARGS[@]}") -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for dev" +IS_SUCCESS=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.persistedQueryList") +if [[ "$IS_SUCCESS" == "null" ]]; then + echo "Error updating pq list for dev" + echo ${UPDATE_DEV_PQ_LIST_ARGS[@]} echo $UPDATE_DEV_PQ_LIST_RESP | jq . exit 1 fi @@ -190,10 +140,8 @@ CREATE_PQ_ARGS_PROD=( --silent --header "x-api-key: $APOLLO_KEY" --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!, \$linkedVariants: [String!]) {\n graph(id: \$graphId) {\n createPersistedQueryList(name: \$name, linkedVariants: \$linkedVariants) {\n ... on CreatePersistedQueryListResult {\n persistedQueryList {\n id\n }\n }\n }\n }\n}\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\",\"linkedVariants\":[\"prod\"]}}" ) if [[ $HEADER != "" ]]; then @@ -214,11 +162,9 @@ UPDATE_PROD_PQ_LIST_ARGS=( --silent --request POST --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" + --header 'content-type: application/json' + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) {\\n graph(id: \$graphId) {\\n variant(name: \$name) {\\n linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) {\\n ... on LinkPersistedQueryListResult {\\n persistedQueryList {\\n id\\n }\\n }\\n }\\n }\\n }\\n}\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" ) if [[ $HEADER != "" ]]; then UPDATE_PROD_PQ_LIST_ARGS+=(--header "$HEADER") @@ -226,11 +172,10 @@ fi UPDATE_PROD_PQ_LIST_RESP=$(curl "${UPDATE_PROD_PQ_LIST_ARGS[@]}") -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for prod" - echo $UPDATE_PROD_PQ_LIST_RESP | jq . +IS_SUCCESS=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.persistedQueryList") +if [[ "$IS_SUCCESS" == "null" ]]; then + echo "Error updating pq list for prod" + echo $CREATE_PQ_PROD_RESP | jq . exit 1 fi @@ -241,7 +186,5 @@ echo "export TF_VAR_apollo_key=\"$GRAPH_KEY\"" >> .env echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env -echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo "export GITHUB_ORG=\"$(git remote get-url origin 2>/dev/null | sed -E 's|.*github.com/([^/]+)/.*|\1|' || echo 'apollosolutions')\"" >> .env echo '' >> .env echo 'Re-run `source .env` to load them.' diff --git a/terraform/minikube/setup_clusters.sh b/terraform/minikube/setup_clusters.sh index 59a207f..a63767d 100755 --- a/terraform/minikube/setup_clusters.sh +++ b/terraform/minikube/setup_clusters.sh @@ -1,87 +1,28 @@ #/bin/bash set -euxo pipefail -# default vars -CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -PROJECT_REGION=${PROJECT_REGION:-"us-east1"} -PROJECT_CLUSTERS=("${CLUSTER_PREFIX}-dev" "${CLUSTER_PREFIX}-prod") -# end default vars - -if [[ $(which gcloud) == "" ]]; then - echo "gcloud not installed" - exit 1 -fi - if [[ $(which kubectl) == "" ]]; then echo "kubectl not installed" exit 1 fi -if [[ $(which kubectx) == "" ]]; then - echo "kubectx not installed" +if [[ $(which minikube) == "" ]]; then + echo "minikube not installed" exit 1 fi -if [[ -z "$PROJECT_ID" ]]; then - echo "Must provide PROJECT_ID in environment" 1>&2 +if [[ $(which kubectx) == "" ]]; then + echo "kubectx not installed" exit 1 fi environment_setup(){ - echo "Configuring Kubeconfig for ${1}..." - gcloud container clusters get-credentials ${1} --zone ${PROJECT_REGION} --project ${PROJECT_ID} - - # short context aliases: supports `kubectx apollo-supergraph-k8s-dev` - kubectx ${1}=. + echo "Configuring Kubeconfig for minikube..." + minikube addons enable ingress + kubectx minikube - # monitoring setup: namespace, service account, and binding - # the service account name matches the otel collector's service account in its helm chart kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - kubectl create serviceaccount -n "monitoring" "metrics-writer" --dry-run=client -o yaml | kubectl apply -f - - kubectl annotate serviceaccount -n "monitoring" "metrics-writer" "iam.gke.io/gcp-service-account=${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" --overwrite - gcloud iam service-accounts add-iam-policy-binding \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${PROJECT_ID}.svc.id.goog[monitoring/metrics-writer]" \ - "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" - - # Apollo GraphOS Operator setup - echo "Installing Apollo GraphOS Operator..." - kubectl create namespace apollo-operator --dry-run=client -o yaml | kubectl apply -f - - kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - - - # Create operator API key secret (requires OPERATOR_KEY to be set) - if [[ -n "$OPERATOR_KEY" ]]; then - kubectl create secret generic apollo-api-key \ - --from-literal="APOLLO_KEY=$OPERATOR_KEY" \ - -n apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - echo "Operator API key secret created" - else - echo "Warning: OPERATOR_KEY not set. Operator secret not created." - fi - - # Install operator using Helm - if [[ $(which helm) != "" ]]; then - helm upgrade --install --atomic apollo-operator \ - oci://registry-1.docker.io/apollograph/operator-chart \ - -n apollo-operator \ - --create-namespace \ - -f - < Date: Tue, 4 Nov 2025 15:31:33 -0800 Subject: [PATCH 02/31] Remove deprecated deployment workflows for AWS and GKE, and update manual deployment process to reflect that subgraph deployment is now handled by the operator. --- .github/workflows/_deploy-subgraphs-aws.yaml | 118 ---------------- .github/workflows/_deploy-subgraphs-gke.yaml | 131 ------------------ .github/workflows/_rover-subgraph-publish.yml | 43 ------ .github/workflows/manual-deploy.yaml | 50 +------ .github/workflows/merge-to-main.yaml | 47 +------ 5 files changed, 8 insertions(+), 381 deletions(-) delete mode 100644 .github/workflows/_deploy-subgraphs-aws.yaml delete mode 100644 .github/workflows/_deploy-subgraphs-gke.yaml delete mode 100644 .github/workflows/_rover-subgraph-publish.yml diff --git a/.github/workflows/_deploy-subgraphs-aws.yaml b/.github/workflows/_deploy-subgraphs-aws.yaml deleted file mode 100644 index 704fc76..0000000 --- a/.github/workflows/_deploy-subgraphs-aws.yaml +++ /dev/null @@ -1,118 +0,0 @@ -name: _deploy-subgraphs-aws - -on: - workflow_call: - inputs: - version: - type: string - default: main - required: true - app_name: - description: Name of the app in Helm charts - type: string - required: true - cluster_suffix: - description: Target AWS cluster suffix - type: string - required: true - dry-run: - type: boolean - description: Run a dry run with helm - required: false - default: false - debug: - type: boolean - description: Run helm in debug mode - required: false - default: false - region: - description: AWS target region - type: string - default: us-east-1 - required: false - secrets: - AWS_ACCESS_KEY: - required: true - AWS_SECRET_KEY: - required: true - CLUSTER_PREFIX: - required: true - -env: - IMAGE: ${{ github.repository }}/${{ inputs.app_name }} - REGISTRY: ghcr.io - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }} - AWS_REGION: ${{ inputs.region }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: "Configure Kubeconfig w/ EKS" - run: aws eks update-kubeconfig --name ${{ secrets.CLUSTER_PREFIX }}-${{ inputs.cluster_suffix }} --region ${{ env.AWS_REGION }} - - # Deploy the Docker image to the GKE cluster with dry run - - name: Helm dry-run - if: ${{ inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade subgraph-${{ inputs.app_name }} \ - --create-namespace \ - --namespace ${{ inputs.app_name }} \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --set "ingress.hosts[0].host=${{ inputs.app_name }}.local,ingress.hosts[0].paths[0].path=/" \ - --dry-run \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/subgraph \ - --values ./deploy/subgraph/values.yaml \ - -f ./deploy/subgraph/environments/${{ inputs.cluster_suffix }}.yaml - - # Deploy the Docker image to the GKE cluster for real with debug - - name: Deploy - if: ${{ !inputs.dry-run && inputs.debug }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade subgraph-${{ inputs.app_name }} \ - --create-namespace \ - --namespace ${{ inputs.app_name }} \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --set "ingress.hosts[0].host=${{ inputs.app_name }}.local,ingress.hosts[0].paths[0].path=/" \ - --atomic \ - --install \ - --timeout 60s \ - --debug \ - ./deploy/subgraph \ - --values ./deploy/subgraph/values.yaml \ - -f ./deploy/subgraph/environments/${{ inputs.cluster_suffix }}.yaml - - # Deploy the Docker image to the GKE cluster for real - - name: Deploy - if: ${{ !inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade subgraph-${{ inputs.app_name }} \ - --create-namespace \ - --namespace ${{ inputs.app_name }} \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --set "ingress.hosts[0].host=${{ inputs.app_name }}.local,ingress.hosts[0].paths[0].path=/" \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/subgraph \ - --values ./deploy/subgraph/values.yaml \ - -f ./deploy/subgraph/environments/${{ inputs.cluster_suffix }}.yaml - diff --git a/.github/workflows/_deploy-subgraphs-gke.yaml b/.github/workflows/_deploy-subgraphs-gke.yaml deleted file mode 100644 index 6e18e34..0000000 --- a/.github/workflows/_deploy-subgraphs-gke.yaml +++ /dev/null @@ -1,131 +0,0 @@ -name: _deploy-subgraphs-gke - -# Started from GH Docs -# https://docs.github.com/en/actions/deployment/deploying-to-your-cloud-provider/deploying-to-google-kubernetes-engine - -on: - workflow_call: - inputs: - version: - type: string - default: main - required: true - app_name: - description: Name of the app in Helm charts - type: string - required: true - cluster_suffix: - description: Target GKE cluster suffix - type: string - required: true - dry-run: - type: boolean - description: Run a dry run with helm - required: false - default: false - debug: - type: boolean - description: Run helm in debug mode - required: false - default: false - secrets: - GCP_CREDENTIALS: - required: true - CLUSTER_PREFIX: - required: true - -env: - IMAGE: ${{ github.repository }}/${{ inputs.app_name }} - GKE_ZONE: us-east1 - REGISTRY: ghcr.io - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - id: "auth" - uses: "google-github-actions/auth@v0" - with: - credentials_json: ${{ secrets.GCP_CREDENTIALS }} - - - name: "Set up Cloud SDK" - uses: google-github-actions/setup-gcloud@v0 - - - name: "Use gcloud CLI" - run: gcloud info - - # Configure Docker to use the gcloud command-line tool as a credential - # helper for authentication - - run: |- - gcloud --quiet auth configure-docker - - # Get the GKE credentials so we can deploy to the cluster - - uses: google-github-actions/get-gke-credentials@fb08709ba27618c31c09e014e1d8364b02e5042e - with: - cluster_name: ${{ secrets.CLUSTER_PREFIX }}-${{ inputs.cluster_suffix }} - location: ${{ env.GKE_ZONE }} - - # Deploy the Docker image to the GKE cluster with dry run - # Deploy the Docker image to the GKE cluster with dry run - - name: Helm dry-run - if: ${{ inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade subgraph-${{ inputs.app_name }} \ - --create-namespace \ - --namespace ${{ inputs.app_name }} \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --set "ingress.hosts[0].host=${{ inputs.app_name }}.local,ingress.hosts[0].paths[0].path=/" \ - --dry-run \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/subgraph \ - --values ./deploy/subgraph/values.yaml \ - -f ./deploy/subgraph/environments/${{ inputs.cluster_suffix }}.yaml - - # Deploy the Docker image to the GKE cluster for real with debug - - name: Deploy - if: ${{ !inputs.dry-run && inputs.debug }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade subgraph-${{ inputs.app_name }} \ - --create-namespace \ - --namespace ${{ inputs.app_name }} \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --set "ingress.hosts[0].host=${{ inputs.app_name }}.local,ingress.hosts[0].paths[0].path=/" \ - --atomic \ - --install \ - --timeout 60s \ - --debug \ - ./deploy/subgraph \ - --values ./deploy/subgraph/values.yaml \ - -f ./deploy/subgraph/environments/${{ inputs.cluster_suffix }}.yaml - - # Deploy the Docker image to the GKE cluster for real - - name: Deploy - if: ${{ !inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade subgraph-${{ inputs.app_name }} \ - --create-namespace \ - --namespace ${{ inputs.app_name }} \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --set "ingress.hosts[0].host=${{ inputs.app_name }}.local,ingress.hosts[0].paths[0].path=/" \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/subgraph \ - --values ./deploy/subgraph/values.yaml \ - -f ./deploy/subgraph/environments/${{ inputs.cluster_suffix }}.yaml diff --git a/.github/workflows/_rover-subgraph-publish.yml b/.github/workflows/_rover-subgraph-publish.yml deleted file mode 100644 index cf2ea22..0000000 --- a/.github/workflows/_rover-subgraph-publish.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: _rover-subgraph-publish - -on: - workflow_call: - inputs: - subgraph_name: - description: Subgraph name in Studio - type: string - required: true - variant: - description: Apollo Studio variant - type: string - required: true - secrets: - APOLLO_KEY: - required: true - APOLLO_GRAPH_ID: - required: true - -env: - APOLLO_VCS_COMMIT: ${{ github.event.pull_request.head.sha }} - -jobs: - publish: - name: Rover Subgraph Publish - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Install Rover - uses: apollographql-gh-actions/install-rover@v1 - - - name: Rover Subgraph Publish - uses: apollographql-gh-actions/rover-subgraph-publish@v1 - with: - apollo-key: ${{ secrets.APOLLO_KEY }} - graph-ref: ${{ secrets.APOLLO_GRAPH_ID }}@${{ inputs.variant }} - name: ${{ inputs.subgraph_name }} - schema: ./subgraphs/${{inputs.subgraph_name}}/schema.graphql - routing-url: http://graphql.${{ inputs.subgraph_name }}.svc.cluster.local:4001 - diff --git a/.github/workflows/manual-deploy.yaml b/.github/workflows/manual-deploy.yaml index ad30392..dc805a7 100644 --- a/.github/workflows/manual-deploy.yaml +++ b/.github/workflows/manual-deploy.yaml @@ -41,56 +41,12 @@ jobs: uses: ./.github/workflows/_determine-provider.yaml secrets: inherit - deploy_gcp: - if: needs.determine_cloud_provider.outputs.gcp == 'true' - needs: [determine_cloud_provider] - uses: ./.github/workflows/_deploy-subgraphs-gke.yaml - secrets: inherit - strategy: - matrix: - subgraph: [checkout, discovery, inventory, orders, products, reviews, shipping, users] - with: - version: ${{ inputs.version }} - app_name: ${{ matrix.subgraph }} - cluster_suffix: ${{ inputs.environment }} - dry-run: ${{ inputs.dry-run }} - debug: ${{ inputs.debug }} - - deploy_aws: - if: needs.determine_cloud_provider.outputs.aws == 'true' - needs: [determine_cloud_provider] - uses: ./.github/workflows/_deploy-subgraphs-aws.yaml - secrets: inherit - strategy: - matrix: - subgraph: [checkout, discovery, inventory, orders, products, reviews, shipping, users] - with: - version: ${{ inputs.version }} - app_name: ${{ matrix.subgraph }} - cluster_suffix: ${{ inputs.environment }} - dry-run: ${{ inputs.dry-run }} - debug: ${{ inputs.debug }} - - publish: - needs: [deploy_aws, deploy_gcp, determine_cloud_provider] - strategy: - matrix: - subgraph: [checkout, discovery, inventory, orders, products, reviews, shipping, users] - if: always() && - (needs.deploy_aws.result == 'success' || needs.deploy_aws.result == 'skipped') && - (needs.deploy_gcp.result == 'success' || needs.deploy_gcp.result == 'skipped') && - (needs.determine_cloud_provider.outputs.apollo == 'true') - uses: ./.github/workflows/_rover-subgraph-publish.yml - secrets: inherit - with: - subgraph_name: ${{ matrix.subgraph }} # change to subgraph-b in that repo - variant: ${{ inputs.environment }} + # Subgraph deployment is now handled by the operator + # No manual deployment or publishing needed publish_pqs: - needs: [deploy_aws, deploy_gcp, determine_cloud_provider] + needs: [determine_cloud_provider] if: always() && - (needs.deploy_aws.result == 'success' || needs.deploy_aws.result == 'skipped') && - (needs.deploy_gcp.result == 'success' || needs.deploy_gcp.result == 'skipped') && (needs.determine_cloud_provider.outputs.apollo == 'true') uses: ./.github/workflows/_rover-client-pq-publish.yml secrets: inherit diff --git a/.github/workflows/merge-to-main.yaml b/.github/workflows/merge-to-main.yaml index 8c6a57d..ea9b601 100644 --- a/.github/workflows/merge-to-main.yaml +++ b/.github/workflows/merge-to-main.yaml @@ -28,7 +28,7 @@ jobs: with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + password: ${{ secrets.GHCR_TOKEN || secrets.GITHUB_TOKEN }} - name: Extract metadata (tags, labels) for Docker id: meta @@ -65,7 +65,7 @@ jobs: with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + password: ${{ secrets.GHCR_TOKEN || secrets.GITHUB_TOKEN }} - name: Extract metadata (tags, labels) for Docker id: meta @@ -105,7 +105,7 @@ jobs: with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + password: ${{ secrets.GHCR_TOKEN || secrets.GITHUB_TOKEN }} - name: Extract metadata (tags, labels) for Docker id: meta @@ -126,45 +126,8 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - deploy_gcp: - if: needs.determine_cloud_provider.outputs.gcp == 'true' - needs: [determine_cloud_provider, build-and-push-subgraph-image, build-and-push-client-image] - uses: ./.github/workflows/_deploy-subgraphs-gke.yaml - secrets: inherit - strategy: - matrix: - subgraph: [checkout, discovery, inventory, orders, products, reviews, shipping, users] - with: - version: main - app_name: ${{ matrix.subgraph }} - cluster_suffix: dev - - deploy_aws: - if: needs.determine_cloud_provider.outputs.aws == 'true' - needs: [determine_cloud_provider, build-and-push-subgraph-image, build-and-push-client-image] - uses: ./.github/workflows/_deploy-subgraphs-aws.yaml - secrets: inherit - strategy: - matrix: - subgraph: [checkout, discovery, inventory, orders, products, reviews, shipping, users] - with: - version: main - app_name: ${{ matrix.subgraph }} - cluster_suffix: dev - region: us-east-1 - - publish: - needs: [determine_cloud_provider] - uses: ./.github/workflows/_rover-subgraph-publish.yml - secrets: inherit - strategy: - matrix: - subgraph: [checkout, discovery, inventory, orders, products, reviews, shipping, users] - if: always() && - (needs.determine_cloud_provider.outputs.apollo == 'true') - with: - subgraph_name: ${{ matrix.subgraph }} # change to subgraph-b in that repo - variant: dev + # Subgraph deployment is now handled by the operator + # No manual deployment or publishing needed publish_pqs: uses: ./.github/workflows/_rover-client-pq-publish.yml From b5e81c9080d3f86c5da7f423747d27ecafe2ef06 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 4 Nov 2025 15:39:06 -0800 Subject: [PATCH 03/31] Enhance account selection process in create_graph.sh. If multiple accounts are found, prompt the user to select one from a numbered list, ensuring valid input before proceeding. --- terraform/gcp/create_graph.sh | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/terraform/gcp/create_graph.sh b/terraform/gcp/create_graph.sh index 9631020..947768a 100755 --- a/terraform/gcp/create_graph.sh +++ b/terraform/gcp/create_graph.sh @@ -41,16 +41,36 @@ if [[ $ACCOUNT_ID == "" ]]; then ACCOUNT_RESP=$(curl "${ACCOUNT_ARGS[@]}") ACCOUNT_COUNT=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships | length") - # if more than one account exists, exit early + # if more than one account exists, ask user to choose if [[ $ACCOUNT_COUNT > 1 ]]; then echo "Apollo Studio returned more than one account." - echo "Specify an account ID with ACCOUNT_ID=myaccount $0" - echo "Accounts: " - echo $(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[].account.id") - exit 1 + echo "Please select an account to use:" + echo "" + + # Store account IDs in array + ACCOUNT_IDS=($(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[].account.id")) + + # Display numbered list of accounts + INDEX=1 + for account_id in "${ACCOUNT_IDS[@]}"; do + echo " $INDEX) $account_id" + ((INDEX++)) + done + + echo "" + read -p "Enter the number of the account to use (1-$ACCOUNT_COUNT): " SELECTION + + # Validate selection + if [[ ! "$SELECTION" =~ ^[0-9]+$ ]] || [[ "$SELECTION" -lt 1 ]] || [[ "$SELECTION" -gt $ACCOUNT_COUNT ]]; then + echo "Invalid selection. Please run the script again and choose a valid number." + exit 1 + fi + + ACCOUNT_ID=${ACCOUNT_IDS[$((SELECTION - 1))]} + echo "Selected account: $ACCOUNT_ID" + else + ACCOUNT_ID=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[0].account.id") fi - - ACCOUNT_ID=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[0].account.id") fi echo "Creating graph $GRAPH_ID on account $ACCOUNT_ID..." From 20f6d37fc1a9929aa8c1b397f17ef012a0af9fdb Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 4 Nov 2025 16:03:15 -0800 Subject: [PATCH 04/31] Remove composition status checks from setup documentation and add schema.graphql file to Dockerfiles for all subgraphs. --- docs/setup.md | 4 ---- subgraphs/checkout/Dockerfile | 1 + subgraphs/discovery/Dockerfile | 1 + subgraphs/inventory/Dockerfile | 1 + subgraphs/orders/Dockerfile | 1 + subgraphs/products/Dockerfile | 1 + subgraphs/reviews/Dockerfile | 1 + subgraphs/shipping/Dockerfile | 1 + subgraphs/users/Dockerfile | 1 + 9 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/setup.md b/docs/setup.md index f115d5f..2e42458 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -299,8 +299,6 @@ The operator will automatically publish schemas to GraphOS and trigger compositi # Check if subgraphs are registered kubectl get subgraphs --all-namespaces -# Check composition status -kubectl describe supergraphschemas reference-architecture-dev -n apollo ``` You can try out a subgraph using port forwarding: @@ -332,8 +330,6 @@ Monitor the deployment: # Check if subgraphs are registered kubectl get subgraphs --all-namespaces -# Check composition status -kubectl describe supergraphschemas reference-architecture-prod -n apollo ``` You've successfully deployed your subgraphs! The next step is to deploy the Apollo Router and Coprocessor. diff --git a/subgraphs/checkout/Dockerfile b/subgraphs/checkout/Dockerfile index 8e5f473..dc42026 100644 --- a/subgraphs/checkout/Dockerfile +++ b/subgraphs/checkout/Dockerfile @@ -10,6 +10,7 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build diff --git a/subgraphs/discovery/Dockerfile b/subgraphs/discovery/Dockerfile index 8e5f473..dc42026 100644 --- a/subgraphs/discovery/Dockerfile +++ b/subgraphs/discovery/Dockerfile @@ -10,6 +10,7 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build diff --git a/subgraphs/inventory/Dockerfile b/subgraphs/inventory/Dockerfile index 8e5f473..dc42026 100644 --- a/subgraphs/inventory/Dockerfile +++ b/subgraphs/inventory/Dockerfile @@ -10,6 +10,7 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build diff --git a/subgraphs/orders/Dockerfile b/subgraphs/orders/Dockerfile index 8e5f473..dc42026 100644 --- a/subgraphs/orders/Dockerfile +++ b/subgraphs/orders/Dockerfile @@ -10,6 +10,7 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build diff --git a/subgraphs/products/Dockerfile b/subgraphs/products/Dockerfile index 8e5f473..dc42026 100644 --- a/subgraphs/products/Dockerfile +++ b/subgraphs/products/Dockerfile @@ -10,6 +10,7 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build diff --git a/subgraphs/reviews/Dockerfile b/subgraphs/reviews/Dockerfile index 8e5f473..dc42026 100644 --- a/subgraphs/reviews/Dockerfile +++ b/subgraphs/reviews/Dockerfile @@ -10,6 +10,7 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build diff --git a/subgraphs/shipping/Dockerfile b/subgraphs/shipping/Dockerfile index 8e5f473..dc42026 100644 --- a/subgraphs/shipping/Dockerfile +++ b/subgraphs/shipping/Dockerfile @@ -10,6 +10,7 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build diff --git a/subgraphs/users/Dockerfile b/subgraphs/users/Dockerfile index 8e5f473..dc42026 100644 --- a/subgraphs/users/Dockerfile +++ b/subgraphs/users/Dockerfile @@ -10,6 +10,7 @@ RUN npm install --no-save COPY src ./src COPY tsconfig.json . COPY codegen.ts . +COPY schema.graphql . RUN npm run build From 0ac251436b988f12df966a04989b09c80ef20ba4 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Thu, 6 Nov 2025 12:05:18 -0800 Subject: [PATCH 05/31] Update subgraph configurations to use the implemented reference architecture and add image pull secrets for GitHub Container Registry. Adjusted image repository paths and included necessary changes in deployment files across all subgraphs. --- deploy/client/values.yaml | 6 ++- deploy/coprocessor/values.yaml | 6 ++- deploy/operator-resources/supergraph-dev.yaml | 3 +- .../operator-resources/supergraph-prod.yaml | 1 - docs/setup.md | 42 ++++++++++++++----- subgraphs/checkout/deploy/values.yaml | 7 +++- subgraphs/checkout/k8s/subgraph-dev.yaml | 2 +- subgraphs/checkout/k8s/subgraph-prod.yaml | 2 +- subgraphs/discovery/deploy/values.yaml | 7 +++- subgraphs/discovery/k8s/subgraph-dev.yaml | 2 +- subgraphs/discovery/k8s/subgraph-prod.yaml | 2 +- subgraphs/inventory/deploy/values.yaml | 7 +++- subgraphs/inventory/k8s/subgraph-dev.yaml | 2 +- subgraphs/inventory/k8s/subgraph-prod.yaml | 2 +- subgraphs/orders/deploy/values.yaml | 7 +++- subgraphs/orders/k8s/subgraph-dev.yaml | 2 +- subgraphs/orders/k8s/subgraph-prod.yaml | 2 +- subgraphs/products/deploy/values.yaml | 7 +++- subgraphs/products/k8s/subgraph-dev.yaml | 2 +- subgraphs/products/k8s/subgraph-prod.yaml | 2 +- subgraphs/reviews/deploy/values.yaml | 7 +++- subgraphs/reviews/k8s/subgraph-dev.yaml | 2 +- subgraphs/reviews/k8s/subgraph-prod.yaml | 2 +- subgraphs/shipping/deploy/values.yaml | 7 +++- subgraphs/shipping/k8s/subgraph-dev.yaml | 2 +- subgraphs/shipping/k8s/subgraph-prod.yaml | 2 +- subgraphs/users/Dockerfile | 1 + subgraphs/users/deploy/values.yaml | 7 +++- subgraphs/users/k8s/subgraph-dev.yaml | 2 +- subgraphs/users/k8s/subgraph-prod.yaml | 2 +- terraform/aws/create_graph.sh | 1 - terraform/aws/setup_clusters.sh | 36 ++++++++++++++++ terraform/gcp/create_graph.sh | 1 - terraform/gcp/github.tf | 6 +-- terraform/gcp/setup_clusters.sh | 36 ++++++++++++++++ terraform/minikube/create_graph.sh | 1 - terraform/minikube/setup_clusters.sh | 36 ++++++++++++++++ 37 files changed, 220 insertions(+), 44 deletions(-) diff --git a/deploy/client/values.yaml b/deploy/client/values.yaml index 40a3c89..48ab896 100644 --- a/deploy/client/values.yaml +++ b/deploy/client/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/${GITHUB_ORG}/reference-architecture/client + repository: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/client pullPolicy: Always tag: main @@ -11,6 +11,10 @@ fullnameOverride: web serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/deploy/coprocessor/values.yaml b/deploy/coprocessor/values.yaml index b37c6a1..e640e57 100644 --- a/deploy/coprocessor/values.yaml +++ b/deploy/coprocessor/values.yaml @@ -3,7 +3,7 @@ namespace: apollo replicaCount: 3 image: - repository: ghcr.io/${GITHUB_ORG}/reference-architecture/coprocessor + repository: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/coprocessor pullPolicy: Always tag: main @@ -13,6 +13,10 @@ fullnameOverride: coprocessor serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/deploy/operator-resources/supergraph-dev.yaml b/deploy/operator-resources/supergraph-dev.yaml index effc65c..dac21e9 100644 --- a/deploy/operator-resources/supergraph-dev.yaml +++ b/deploy/operator-resources/supergraph-dev.yaml @@ -5,7 +5,6 @@ metadata: namespace: apollo spec: replicas: 1 - serviceName: router podTemplate: routerVersion: 1.37.0 resources: @@ -13,6 +12,8 @@ spec: cpu: 100m memory: 256Mi schema: + studio: + graphRef: my-graph@dev resource: name: reference-architecture-dev namespace: apollo diff --git a/deploy/operator-resources/supergraph-prod.yaml b/deploy/operator-resources/supergraph-prod.yaml index 19f0bbc..98eb7b8 100644 --- a/deploy/operator-resources/supergraph-prod.yaml +++ b/deploy/operator-resources/supergraph-prod.yaml @@ -5,7 +5,6 @@ metadata: namespace: apollo spec: replicas: 3 - serviceName: router podTemplate: routerVersion: 1.37.0 resources: diff --git a/docs/setup.md b/docs/setup.md index 2e42458..a3c259a 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -127,7 +127,7 @@ Edit the new `.env` file: ```sh export PROJECT_ID="" # if using AWS, you will not see this line and can omit this export APOLLO_KEY="" -export GITHUB_ORG="" +export GITHUB_ORG="" # (not a git URL, just the username/org name) export TF_VAR_github_token="" ``` @@ -280,15 +280,27 @@ After this completes, you're ready to deploy your subgraphs! ### Deploy subgraphs to dev +**Note:** The image pull secret is automatically created by `setup_clusters.sh` if `TF_VAR_github_token` and `GITHUB_ORG` are set. `GITHUB_ORG` must be your GitHub username or organization name (e.g., `andywgarcia`), not a git URL. If you need to create it manually, use the commands below with your GitHub username and token. + Deploy the subgraph services and register them with the operator: ```sh -kubectx apollo-supergraph-k8s-dev - # Deploy each subgraph service for subgraph in checkout discovery inventory orders products reviews shipping users; do kubectl create namespace $subgraph --dry-run=client -o yaml | kubectl apply -f - - helm install $subgraph subgraphs/$subgraph/deploy -f subgraphs/$subgraph/deploy/environments/dev.yaml -n $subgraph + + # Copy the image pull secret to each namespace (if it exists) + if kubectl get secret ghcr-secret -n default &>/dev/null; then + kubectl get secret ghcr-secret -n default -o yaml | \ + sed 's/namespace: default/namespace: '"$subgraph"'/' | \ + kubectl apply -f - + fi + + # Install (imagePullSecrets are configured in values.yaml) + helm install $subgraph subgraphs/$subgraph/deploy \ + -f subgraphs/$subgraph/deploy/environments/dev.yaml \ + -n $subgraph + kubectl apply -f subgraphs/$subgraph/k8s/subgraph-dev.yaml done ``` @@ -311,15 +323,27 @@ Then visit [http://localhost:4001/](http://localhost:4001/). ### Deploy subgraphs to prod -Deploy the subgraphs to production using the same process: +**Note:** The image pull secret is automatically created by `setup_clusters.sh` if `TF_VAR_github_token` and `GITHUB_ORG` are set. `GITHUB_ORG` must be your GitHub username or organization name (e.g., `andywgarcia`), not a git URL. If you need to create it manually, use the commands below with your GitHub username and token. -```sh -kubectx apollo-supergraph-k8s-prod +Deploy the subgraphs to production: +```sh # Deploy each subgraph service for subgraph in checkout discovery inventory orders products reviews shipping users; do kubectl create namespace $subgraph --dry-run=client -o yaml | kubectl apply -f - - helm install $subgraph subgraphs/$subgraph/deploy -f subgraphs/$subgraph/deploy/environments/prod.yaml -n $subgraph + + # Copy the image pull secret to each namespace (if it exists) + if kubectl get secret ghcr-secret -n default &>/dev/null; then + kubectl get secret ghcr-secret -n default -o yaml | \ + sed 's/namespace: default/namespace: '"$subgraph"'/' | \ + kubectl apply -f - + fi + + # Install (imagePullSecrets are configured in values.yaml) + helm install $subgraph subgraphs/$subgraph/deploy \ + -f subgraphs/$subgraph/deploy/environments/prod.yaml \ + -n $subgraph + kubectl apply -f subgraphs/$subgraph/k8s/subgraph-prod.yaml done ``` @@ -345,7 +369,6 @@ kubectx apollo-supergraph-k8s-dev if command -v envsubst &> /dev/null; then envsubst < deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo else - # Fallback if envsubst not available sed "s|\${GITHUB_ORG}|${GITHUB_ORG:-apollosolutions}|g" deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo fi @@ -354,7 +377,6 @@ kubectx apollo-supergraph-k8s-prod if command -v envsubst &> /dev/null; then envsubst < deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo else - # Fallback if envsubst not available sed "s|\${GITHUB_ORG}|${GITHUB_ORG:-apollosolutions}|g" deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo fi ``` diff --git a/subgraphs/checkout/deploy/values.yaml b/subgraphs/checkout/deploy/values.yaml index cd0958c..c068f7a 100644 --- a/subgraphs/checkout/deploy/values.yaml +++ b/subgraphs/checkout/deploy/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/andywgarcia/implemented-reference-architecture/checkout pullPolicy: Always tag: main @@ -11,6 +11,11 @@ fullnameOverride: graphql serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +# Set this if your images are private and require authentication +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/subgraphs/checkout/k8s/subgraph-dev.yaml b/subgraphs/checkout/k8s/subgraph-dev.yaml index a1404ec..70a6fa8 100644 --- a/subgraphs/checkout/k8s/subgraph-dev.yaml +++ b/subgraphs/checkout/k8s/subgraph-dev.yaml @@ -10,6 +10,6 @@ spec: endpoint: http://graphql.checkout.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/checkout:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/checkout:main path: /app/schema.graphql diff --git a/subgraphs/checkout/k8s/subgraph-prod.yaml b/subgraphs/checkout/k8s/subgraph-prod.yaml index 65a70cb..70a6fa8 100644 --- a/subgraphs/checkout/k8s/subgraph-prod.yaml +++ b/subgraphs/checkout/k8s/subgraph-prod.yaml @@ -10,6 +10,6 @@ spec: endpoint: http://graphql.checkout.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/andywgarcia/reference-architecture/checkout:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/checkout:main path: /app/schema.graphql diff --git a/subgraphs/discovery/deploy/values.yaml b/subgraphs/discovery/deploy/values.yaml index cd0958c..672e1c2 100644 --- a/subgraphs/discovery/deploy/values.yaml +++ b/subgraphs/discovery/deploy/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/andywgarcia/implemented-reference-architecture/discovery pullPolicy: Always tag: main @@ -11,6 +11,11 @@ fullnameOverride: graphql serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +# Set this if your images are private and require authentication +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/subgraphs/discovery/k8s/subgraph-dev.yaml b/subgraphs/discovery/k8s/subgraph-dev.yaml index 4890039..f69c8ac 100644 --- a/subgraphs/discovery/k8s/subgraph-dev.yaml +++ b/subgraphs/discovery/k8s/subgraph-dev.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.discovery.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/discovery:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/discovery:main path: /app/schema.graphql diff --git a/subgraphs/discovery/k8s/subgraph-prod.yaml b/subgraphs/discovery/k8s/subgraph-prod.yaml index 4890039..f69c8ac 100644 --- a/subgraphs/discovery/k8s/subgraph-prod.yaml +++ b/subgraphs/discovery/k8s/subgraph-prod.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.discovery.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/discovery:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/discovery:main path: /app/schema.graphql diff --git a/subgraphs/inventory/deploy/values.yaml b/subgraphs/inventory/deploy/values.yaml index cd0958c..1fb8fd4 100644 --- a/subgraphs/inventory/deploy/values.yaml +++ b/subgraphs/inventory/deploy/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/andywgarcia/implemented-reference-architecture/inventory pullPolicy: Always tag: main @@ -11,6 +11,11 @@ fullnameOverride: graphql serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +# Set this if your images are private and require authentication +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/subgraphs/inventory/k8s/subgraph-dev.yaml b/subgraphs/inventory/k8s/subgraph-dev.yaml index a7fcb24..1dfac56 100644 --- a/subgraphs/inventory/k8s/subgraph-dev.yaml +++ b/subgraphs/inventory/k8s/subgraph-dev.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.inventory.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/inventory:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/inventory:main path: /app/schema.graphql diff --git a/subgraphs/inventory/k8s/subgraph-prod.yaml b/subgraphs/inventory/k8s/subgraph-prod.yaml index a7fcb24..1dfac56 100644 --- a/subgraphs/inventory/k8s/subgraph-prod.yaml +++ b/subgraphs/inventory/k8s/subgraph-prod.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.inventory.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/inventory:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/inventory:main path: /app/schema.graphql diff --git a/subgraphs/orders/deploy/values.yaml b/subgraphs/orders/deploy/values.yaml index cd0958c..9db7dba 100644 --- a/subgraphs/orders/deploy/values.yaml +++ b/subgraphs/orders/deploy/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/andywgarcia/implemented-reference-architecture/orders pullPolicy: Always tag: main @@ -11,6 +11,11 @@ fullnameOverride: graphql serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +# Set this if your images are private and require authentication +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/subgraphs/orders/k8s/subgraph-dev.yaml b/subgraphs/orders/k8s/subgraph-dev.yaml index d4acc82..1793d41 100644 --- a/subgraphs/orders/k8s/subgraph-dev.yaml +++ b/subgraphs/orders/k8s/subgraph-dev.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.orders.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/orders:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/orders:main path: /app/schema.graphql diff --git a/subgraphs/orders/k8s/subgraph-prod.yaml b/subgraphs/orders/k8s/subgraph-prod.yaml index d4acc82..1793d41 100644 --- a/subgraphs/orders/k8s/subgraph-prod.yaml +++ b/subgraphs/orders/k8s/subgraph-prod.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.orders.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/orders:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/orders:main path: /app/schema.graphql diff --git a/subgraphs/products/deploy/values.yaml b/subgraphs/products/deploy/values.yaml index cd0958c..ad7902e 100644 --- a/subgraphs/products/deploy/values.yaml +++ b/subgraphs/products/deploy/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/andywgarcia/implemented-reference-architecture/products pullPolicy: Always tag: main @@ -11,6 +11,11 @@ fullnameOverride: graphql serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +# Set this if your images are private and require authentication +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/subgraphs/products/k8s/subgraph-dev.yaml b/subgraphs/products/k8s/subgraph-dev.yaml index e0fbdc5..980c79d 100644 --- a/subgraphs/products/k8s/subgraph-dev.yaml +++ b/subgraphs/products/k8s/subgraph-dev.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.products.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/products:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/products:main path: /app/schema.graphql diff --git a/subgraphs/products/k8s/subgraph-prod.yaml b/subgraphs/products/k8s/subgraph-prod.yaml index e0fbdc5..980c79d 100644 --- a/subgraphs/products/k8s/subgraph-prod.yaml +++ b/subgraphs/products/k8s/subgraph-prod.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.products.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/products:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/products:main path: /app/schema.graphql diff --git a/subgraphs/reviews/deploy/values.yaml b/subgraphs/reviews/deploy/values.yaml index cd0958c..0827dc8 100644 --- a/subgraphs/reviews/deploy/values.yaml +++ b/subgraphs/reviews/deploy/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/andywgarcia/implemented-reference-architecture/reviews pullPolicy: Always tag: main @@ -11,6 +11,11 @@ fullnameOverride: graphql serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +# Set this if your images are private and require authentication +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/subgraphs/reviews/k8s/subgraph-dev.yaml b/subgraphs/reviews/k8s/subgraph-dev.yaml index 55b87d9..5fb5903 100644 --- a/subgraphs/reviews/k8s/subgraph-dev.yaml +++ b/subgraphs/reviews/k8s/subgraph-dev.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.reviews.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/reviews:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/reviews:main path: /app/schema.graphql diff --git a/subgraphs/reviews/k8s/subgraph-prod.yaml b/subgraphs/reviews/k8s/subgraph-prod.yaml index 55b87d9..5fb5903 100644 --- a/subgraphs/reviews/k8s/subgraph-prod.yaml +++ b/subgraphs/reviews/k8s/subgraph-prod.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.reviews.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/reviews:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/reviews:main path: /app/schema.graphql diff --git a/subgraphs/shipping/deploy/values.yaml b/subgraphs/shipping/deploy/values.yaml index cd0958c..ff4c6a8 100644 --- a/subgraphs/shipping/deploy/values.yaml +++ b/subgraphs/shipping/deploy/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/andywgarcia/implemented-reference-architecture/shipping pullPolicy: Always tag: main @@ -11,6 +11,11 @@ fullnameOverride: graphql serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +# Set this if your images are private and require authentication +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/subgraphs/shipping/k8s/subgraph-dev.yaml b/subgraphs/shipping/k8s/subgraph-dev.yaml index bc31d82..5178e7b 100644 --- a/subgraphs/shipping/k8s/subgraph-dev.yaml +++ b/subgraphs/shipping/k8s/subgraph-dev.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.shipping.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/shipping:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/shipping:main path: /app/schema.graphql diff --git a/subgraphs/shipping/k8s/subgraph-prod.yaml b/subgraphs/shipping/k8s/subgraph-prod.yaml index bc31d82..5178e7b 100644 --- a/subgraphs/shipping/k8s/subgraph-prod.yaml +++ b/subgraphs/shipping/k8s/subgraph-prod.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.shipping.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/shipping:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/shipping:main path: /app/schema.graphql diff --git a/subgraphs/users/Dockerfile b/subgraphs/users/Dockerfile index dc42026..7b15dec 100644 --- a/subgraphs/users/Dockerfile +++ b/subgraphs/users/Dockerfile @@ -11,6 +11,7 @@ COPY src ./src COPY tsconfig.json . COPY codegen.ts . COPY schema.graphql . +COPY keys ./keys RUN npm run build diff --git a/subgraphs/users/deploy/values.yaml b/subgraphs/users/deploy/values.yaml index cd0958c..1cfca0e 100644 --- a/subgraphs/users/deploy/values.yaml +++ b/subgraphs/users/deploy/values.yaml @@ -1,7 +1,7 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/reference-architecture/checkout + repository: ghcr.io/andywgarcia/implemented-reference-architecture/users pullPolicy: Always tag: main @@ -11,6 +11,11 @@ fullnameOverride: graphql serviceAccount: create: false +# Image pull secrets for GitHub Container Registry +# Set this if your images are private and require authentication +imagePullSecrets: + - name: ghcr-secret + podAnnotations: {} securityContext: {} diff --git a/subgraphs/users/k8s/subgraph-dev.yaml b/subgraphs/users/k8s/subgraph-dev.yaml index 2d63703..15a5296 100644 --- a/subgraphs/users/k8s/subgraph-dev.yaml +++ b/subgraphs/users/k8s/subgraph-dev.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.users.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/users:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/users:main path: /app/schema.graphql diff --git a/subgraphs/users/k8s/subgraph-prod.yaml b/subgraphs/users/k8s/subgraph-prod.yaml index 2d63703..15a5296 100644 --- a/subgraphs/users/k8s/subgraph-prod.yaml +++ b/subgraphs/users/k8s/subgraph-prod.yaml @@ -10,5 +10,5 @@ spec: endpoint: http://graphql.users.svc.cluster.local:4001 schema: ociImage: - reference: ghcr.io/${GITHUB_ORG}/reference-architecture/users:main + reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/users:main path: /app/schema.graphql diff --git a/terraform/aws/create_graph.sh b/terraform/aws/create_graph.sh index 9631020..158b0de 100755 --- a/terraform/aws/create_graph.sh +++ b/terraform/aws/create_graph.sh @@ -242,6 +242,5 @@ echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo "export GITHUB_ORG=\"$(git remote get-url origin 2>/dev/null | sed -E 's|.*github.com/([^/]+)/.*|\1|' || echo 'apollosolutions')\"" >> .env echo '' >> .env echo 'Re-run `source .env` to load them.' diff --git a/terraform/aws/setup_clusters.sh b/terraform/aws/setup_clusters.sh index 59a207f..8f563b9 100755 --- a/terraform/aws/setup_clusters.sh +++ b/terraform/aws/setup_clusters.sh @@ -60,6 +60,42 @@ environment_setup(){ echo "Warning: OPERATOR_KEY not set. Operator secret not created." fi + # Create GitHub Container Registry image pull secret (optional, requires TF_VAR_github_token) + if [[ -n "$TF_VAR_github_token" && -n "$GITHUB_ORG" ]]; then + echo "Creating GitHub Container Registry image pull secret..." + # Create in default namespace + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=default \ + --dry-run=client -o yaml | kubectl apply -f - + + # Create in apollo namespace + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=apollo \ + --dry-run=client -o yaml | kubectl apply -f - + + # Create in apollo-operator namespace and patch service account + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=apollo-operator \ + --dry-run=client -o yaml | kubectl apply -f - + + kubectl patch serviceaccount apollo-operator -n apollo-operator \ + -p '{"imagePullSecrets":[{"name":"ghcr-secret"}]}' || true + + echo "GitHub Container Registry image pull secret created" + else + echo "Warning: TF_VAR_github_token and/or GITHUB_ORG not set. Image pull secret not created." + echo " Subgraphs may fail to pull images if they are private. Set these variables to enable image pull authentication." + fi + # Install operator using Helm if [[ $(which helm) != "" ]]; then helm upgrade --install --atomic apollo-operator \ diff --git a/terraform/gcp/create_graph.sh b/terraform/gcp/create_graph.sh index 947768a..2356390 100755 --- a/terraform/gcp/create_graph.sh +++ b/terraform/gcp/create_graph.sh @@ -262,6 +262,5 @@ echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo "export GITHUB_ORG=\"$(git remote get-url origin 2>/dev/null | sed -E 's|.*github.com/([^/]+)/.*|\1|' || echo 'apollosolutions')\"" >> .env echo '' >> .env echo 'Re-run `source .env` to load them.' diff --git a/terraform/gcp/github.tf b/terraform/gcp/github.tf index 15ca5cb..5fc1514 100644 --- a/terraform/gcp/github.tf +++ b/terraform/gcp/github.tf @@ -4,15 +4,15 @@ provider "github" { # Infra repo for Router, Otel, load testing resource "github_repository" "repo" { - name = "reference-architecture" + name = "implemented-reference-architecture" description = "Apollo supergraph reference architecture repository" visibility = "public" depends_on = [ module.gke ] template { - owner = "apollosolutions" - repository = "reference-architecture" + owner = "andywgarcia" + repository = "reference-architecture-1" } } diff --git a/terraform/gcp/setup_clusters.sh b/terraform/gcp/setup_clusters.sh index 59a207f..8f563b9 100755 --- a/terraform/gcp/setup_clusters.sh +++ b/terraform/gcp/setup_clusters.sh @@ -60,6 +60,42 @@ environment_setup(){ echo "Warning: OPERATOR_KEY not set. Operator secret not created." fi + # Create GitHub Container Registry image pull secret (optional, requires TF_VAR_github_token) + if [[ -n "$TF_VAR_github_token" && -n "$GITHUB_ORG" ]]; then + echo "Creating GitHub Container Registry image pull secret..." + # Create in default namespace + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=default \ + --dry-run=client -o yaml | kubectl apply -f - + + # Create in apollo namespace + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=apollo \ + --dry-run=client -o yaml | kubectl apply -f - + + # Create in apollo-operator namespace and patch service account + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=apollo-operator \ + --dry-run=client -o yaml | kubectl apply -f - + + kubectl patch serviceaccount apollo-operator -n apollo-operator \ + -p '{"imagePullSecrets":[{"name":"ghcr-secret"}]}' || true + + echo "GitHub Container Registry image pull secret created" + else + echo "Warning: TF_VAR_github_token and/or GITHUB_ORG not set. Image pull secret not created." + echo " Subgraphs may fail to pull images if they are private. Set these variables to enable image pull authentication." + fi + # Install operator using Helm if [[ $(which helm) != "" ]]; then helm upgrade --install --atomic apollo-operator \ diff --git a/terraform/minikube/create_graph.sh b/terraform/minikube/create_graph.sh index 9631020..158b0de 100755 --- a/terraform/minikube/create_graph.sh +++ b/terraform/minikube/create_graph.sh @@ -242,6 +242,5 @@ echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo "export GITHUB_ORG=\"$(git remote get-url origin 2>/dev/null | sed -E 's|.*github.com/([^/]+)/.*|\1|' || echo 'apollosolutions')\"" >> .env echo '' >> .env echo 'Re-run `source .env` to load them.' diff --git a/terraform/minikube/setup_clusters.sh b/terraform/minikube/setup_clusters.sh index 59a207f..8f563b9 100755 --- a/terraform/minikube/setup_clusters.sh +++ b/terraform/minikube/setup_clusters.sh @@ -60,6 +60,42 @@ environment_setup(){ echo "Warning: OPERATOR_KEY not set. Operator secret not created." fi + # Create GitHub Container Registry image pull secret (optional, requires TF_VAR_github_token) + if [[ -n "$TF_VAR_github_token" && -n "$GITHUB_ORG" ]]; then + echo "Creating GitHub Container Registry image pull secret..." + # Create in default namespace + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=default \ + --dry-run=client -o yaml | kubectl apply -f - + + # Create in apollo namespace + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=apollo \ + --dry-run=client -o yaml | kubectl apply -f - + + # Create in apollo-operator namespace and patch service account + kubectl create secret docker-registry ghcr-secret \ + --docker-server=ghcr.io \ + --docker-username="$GITHUB_ORG" \ + --docker-password="$TF_VAR_github_token" \ + --namespace=apollo-operator \ + --dry-run=client -o yaml | kubectl apply -f - + + kubectl patch serviceaccount apollo-operator -n apollo-operator \ + -p '{"imagePullSecrets":[{"name":"ghcr-secret"}]}' || true + + echo "GitHub Container Registry image pull secret created" + else + echo "Warning: TF_VAR_github_token and/or GITHUB_ORG not set. Image pull secret not created." + echo " Subgraphs may fail to pull images if they are private. Set these variables to enable image pull authentication." + fi + # Install operator using Helm if [[ $(which helm) != "" ]]; then helm upgrade --install --atomic apollo-operator \ From 106deb0ce9c2efd37e752e76d603e7685b5f4be9 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Thu, 6 Nov 2025 12:21:48 -0800 Subject: [PATCH 06/31] Remove deprecated studio.graphRef configuration from supergraph-dev.yaml to streamline deployment settings. --- deploy/operator-resources/supergraph-dev.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/deploy/operator-resources/supergraph-dev.yaml b/deploy/operator-resources/supergraph-dev.yaml index dac21e9..cd32901 100644 --- a/deploy/operator-resources/supergraph-dev.yaml +++ b/deploy/operator-resources/supergraph-dev.yaml @@ -12,8 +12,6 @@ spec: cpu: 100m memory: 256Mi schema: - studio: - graphRef: my-graph@dev resource: name: reference-architecture-dev namespace: apollo From c23d9c5ab327171d04fa7b95f29063abdf7c5eb3 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Thu, 6 Nov 2025 15:50:14 -0800 Subject: [PATCH 07/31] Update router versions in supergraph deployment files and add values.yaml for API key configuration --- deploy/operator-resources/supergraph-dev.yaml | 2 +- deploy/operator-resources/supergraph-prod.yaml | 2 +- deploy/operator-resources/values.yaml | 6 ++++++ 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 deploy/operator-resources/values.yaml diff --git a/deploy/operator-resources/supergraph-dev.yaml b/deploy/operator-resources/supergraph-dev.yaml index cd32901..807f6f8 100644 --- a/deploy/operator-resources/supergraph-dev.yaml +++ b/deploy/operator-resources/supergraph-dev.yaml @@ -6,7 +6,7 @@ metadata: spec: replicas: 1 podTemplate: - routerVersion: 1.37.0 + routerVersion: 2.7.0 resources: requests: cpu: 100m diff --git a/deploy/operator-resources/supergraph-prod.yaml b/deploy/operator-resources/supergraph-prod.yaml index 98eb7b8..9b3ec55 100644 --- a/deploy/operator-resources/supergraph-prod.yaml +++ b/deploy/operator-resources/supergraph-prod.yaml @@ -6,7 +6,7 @@ metadata: spec: replicas: 3 podTemplate: - routerVersion: 1.37.0 + routerVersion: 2.3.0 resources: requests: cpu: 500m diff --git a/deploy/operator-resources/values.yaml b/deploy/operator-resources/values.yaml new file mode 100644 index 0000000..6baff3b --- /dev/null +++ b/deploy/operator-resources/values.yaml @@ -0,0 +1,6 @@ +apiKey: + secretName: apollo-api-key +config: + controllers: + supergraph: + apiKeySecret: apollo-api-key \ No newline at end of file From 1ca7f40be74b111e65bd375dea347669eb459b30 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 10:42:28 -0800 Subject: [PATCH 08/31] Converting to only running locally --- .github/workflows/_deploy-client-aws.yaml | 104 --- .github/workflows/_deploy-client-gke.yaml | 116 ---- .../workflows/_deploy-coprocessor-aws.yaml | 108 ---- .../workflows/_deploy-coprocessor-gke.yaml | 120 ---- .../workflows/_deploy-loadtest-infra-aws.yaml | 49 -- .../workflows/_deploy-loadtest-infra-gke.yaml | 71 --- .../workflows/_deploy-otel-collector-aws.yaml | 63 -- .../workflows/_deploy-otel-collector-gke.yaml | 77 --- .github/workflows/_deploy-router-aws.yaml | 116 ---- .github/workflows/_deploy-router-gke.yaml | 129 ---- .github/workflows/_determine-provider.yaml | 36 -- .../workflows/_rover-client-pq-publish.yml | 44 -- .github/workflows/_rover-subgraph-check.yml | 42 -- .github/workflows/_run-loadtest-aws.yaml | 59 -- .github/workflows/_run-loadtest-gke.yaml | 77 --- .github/workflows/_uninstall-router-aws.yaml | 38 -- .github/workflows/_uninstall-router-gke.yaml | 58 -- .github/workflows/deploy-client.yaml | 56 -- .github/workflows/deploy-contract-router.yaml | 61 -- .github/workflows/deploy-coprocessor.yaml | 50 -- .github/workflows/deploy-loadtest-infra.yaml | 28 - .github/workflows/deploy-otel-collector.yaml | 30 - .github/workflows/deploy-router.yaml | 50 -- .github/workflows/manual-deploy.yaml | 54 -- .github/workflows/merge-to-main.yaml | 139 ---- .github/workflows/pr-check-code.yaml | 39 -- .github/workflows/pr-check-deploy.yaml | 18 - .github/workflows/run-load-test.yaml | 44 -- .github/workflows/uninstall-router.yaml | 42 -- README.md | 39 +- deploy/client/values.yaml | 10 +- deploy/coprocessor/values.yaml | 10 +- deploy/operator-resources/apply-resources.sh | 92 ++- deploy/operator-resources/ingress-dev.yaml | 16 +- deploy/operator-resources/ingress-prod.yaml | 18 +- docs/setup.md | 595 ++++++------------ scripts/minikube/01-setup-minikube.sh | 48 ++ scripts/minikube/02-setup-apollo-graph.sh | 176 ++++++ scripts/minikube/03-setup-cluster.sh | 90 +++ scripts/minikube/04-build-images.sh | 100 +++ scripts/minikube/05-deploy-subgraphs.sh | 109 ++++ .../minikube/06-deploy-operator-resources.sh | 106 ++++ scripts/minikube/07-deploy-ingress.sh | 115 ++++ scripts/minikube/08-deploy-client.sh | 134 ++++ subgraphs/checkout/deploy/values.yaml | 11 +- subgraphs/discovery/deploy/values.yaml | 11 +- subgraphs/inventory/deploy/values.yaml | 11 +- subgraphs/orders/deploy/values.yaml | 11 +- subgraphs/products/deploy/values.yaml | 11 +- subgraphs/reviews/deploy/values.yaml | 11 +- subgraphs/shipping/deploy/values.yaml | 11 +- subgraphs/users/deploy/values.yaml | 11 +- terraform/aws/.env.sample | 6 - terraform/aws/.terraform.lock.hcl | 145 ----- terraform/aws/create_graph.sh | 246 -------- terraform/aws/eks_dev.tf | 69 -- terraform/aws/eks_prod.tf | 68 -- terraform/aws/github.tf | 57 -- terraform/aws/iam.tf | 29 - terraform/aws/main.tf | 14 - terraform/aws/outputs.tf | 14 - terraform/aws/setup_clusters.sh | 123 ---- terraform/aws/variables.tf | 62 -- terraform/aws/vpc.tf | 36 -- terraform/gcp/.env.sample | 10 - terraform/gcp/.terraform.lock.hcl | 123 ---- terraform/gcp/create_graph.sh | 266 -------- terraform/gcp/github.tf | 72 --- terraform/gcp/gke.tf | 46 -- terraform/gcp/main.tf | 15 - terraform/gcp/metrics.tf | 18 - terraform/gcp/outputs.tf | 11 - terraform/gcp/setup_clusters.sh | 123 ---- terraform/gcp/variables.tf | 64 -- terraform/gcp/vpc.tf | 28 - terraform/minikube/github.tf | 38 -- terraform/minikube/main.tf | 7 - terraform/minikube/outputs.tf | 12 - terraform/minikube/variables.tf | 32 - 79 files changed, 1211 insertions(+), 4187 deletions(-) delete mode 100644 .github/workflows/_deploy-client-aws.yaml delete mode 100644 .github/workflows/_deploy-client-gke.yaml delete mode 100644 .github/workflows/_deploy-coprocessor-aws.yaml delete mode 100644 .github/workflows/_deploy-coprocessor-gke.yaml delete mode 100644 .github/workflows/_deploy-loadtest-infra-aws.yaml delete mode 100644 .github/workflows/_deploy-loadtest-infra-gke.yaml delete mode 100644 .github/workflows/_deploy-otel-collector-aws.yaml delete mode 100644 .github/workflows/_deploy-otel-collector-gke.yaml delete mode 100644 .github/workflows/_deploy-router-aws.yaml delete mode 100644 .github/workflows/_deploy-router-gke.yaml delete mode 100644 .github/workflows/_determine-provider.yaml delete mode 100644 .github/workflows/_rover-client-pq-publish.yml delete mode 100644 .github/workflows/_rover-subgraph-check.yml delete mode 100644 .github/workflows/_run-loadtest-aws.yaml delete mode 100644 .github/workflows/_run-loadtest-gke.yaml delete mode 100644 .github/workflows/_uninstall-router-aws.yaml delete mode 100644 .github/workflows/_uninstall-router-gke.yaml delete mode 100644 .github/workflows/deploy-client.yaml delete mode 100644 .github/workflows/deploy-contract-router.yaml delete mode 100644 .github/workflows/deploy-coprocessor.yaml delete mode 100644 .github/workflows/deploy-loadtest-infra.yaml delete mode 100644 .github/workflows/deploy-otel-collector.yaml delete mode 100644 .github/workflows/deploy-router.yaml delete mode 100644 .github/workflows/manual-deploy.yaml delete mode 100644 .github/workflows/merge-to-main.yaml delete mode 100644 .github/workflows/pr-check-code.yaml delete mode 100644 .github/workflows/pr-check-deploy.yaml delete mode 100644 .github/workflows/run-load-test.yaml delete mode 100644 .github/workflows/uninstall-router.yaml create mode 100755 scripts/minikube/01-setup-minikube.sh create mode 100755 scripts/minikube/02-setup-apollo-graph.sh create mode 100755 scripts/minikube/03-setup-cluster.sh create mode 100755 scripts/minikube/04-build-images.sh create mode 100755 scripts/minikube/05-deploy-subgraphs.sh create mode 100755 scripts/minikube/06-deploy-operator-resources.sh create mode 100755 scripts/minikube/07-deploy-ingress.sh create mode 100755 scripts/minikube/08-deploy-client.sh delete mode 100644 terraform/aws/.env.sample delete mode 100644 terraform/aws/.terraform.lock.hcl delete mode 100755 terraform/aws/create_graph.sh delete mode 100644 terraform/aws/eks_dev.tf delete mode 100644 terraform/aws/eks_prod.tf delete mode 100644 terraform/aws/github.tf delete mode 100644 terraform/aws/iam.tf delete mode 100644 terraform/aws/main.tf delete mode 100644 terraform/aws/outputs.tf delete mode 100755 terraform/aws/setup_clusters.sh delete mode 100644 terraform/aws/variables.tf delete mode 100644 terraform/aws/vpc.tf delete mode 100644 terraform/gcp/.env.sample delete mode 100644 terraform/gcp/.terraform.lock.hcl delete mode 100755 terraform/gcp/create_graph.sh delete mode 100644 terraform/gcp/github.tf delete mode 100644 terraform/gcp/gke.tf delete mode 100644 terraform/gcp/main.tf delete mode 100644 terraform/gcp/metrics.tf delete mode 100644 terraform/gcp/outputs.tf delete mode 100755 terraform/gcp/setup_clusters.sh delete mode 100644 terraform/gcp/variables.tf delete mode 100644 terraform/gcp/vpc.tf delete mode 100644 terraform/minikube/github.tf delete mode 100644 terraform/minikube/main.tf delete mode 100644 terraform/minikube/outputs.tf delete mode 100644 terraform/minikube/variables.tf diff --git a/.github/workflows/_deploy-client-aws.yaml b/.github/workflows/_deploy-client-aws.yaml deleted file mode 100644 index 952fb74..0000000 --- a/.github/workflows/_deploy-client-aws.yaml +++ /dev/null @@ -1,104 +0,0 @@ -name: _deploy-client-aws - -on: - workflow_call: - inputs: - version: - type: string - default: main - required: true - dry-run: - type: boolean - description: Run a dry run with helm - required: false - default: false - debug: - type: boolean - description: Run helm in debug mode - required: false - default: false - region: - description: AWS target region - type: string - default: us-east-1 - required: false - secrets: - AWS_ACCESS_KEY: - required: true - AWS_SECRET_KEY: - required: true - CLUSTER_PREFIX: - required: true - -env: - IMAGE: ${{ github.repository }}/client - REGISTRY: ghcr.io - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }} - AWS_REGION: ${{ inputs.region }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: "Configure Kubeconfig w/ EKS" - run: aws eks update-kubeconfig --name ${{ secrets.CLUSTER_PREFIX }}-prod --region ${{ env.AWS_REGION }} - - # Deploy the Docker image to the GKE cluster with dry run - - name: Helm dry-run - if: ${{ inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade client \ - --create-namespace \ - --namespace client \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --dry-run \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/client \ - --values ./deploy/client/values.yaml \ - - # Deploy the Docker image to the GKE cluster for real with debug - - name: Deploy - if: ${{ !inputs.dry-run && inputs.debug }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade client \ - --create-namespace \ - --namespace client \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --atomic \ - --install \ - --timeout 60s \ - --debug \ - ./deploy/client \ - --values ./deploy/client/values.yaml \ - - # Deploy the Docker image to the GKE cluster for real - - name: Deploy - if: ${{ !inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade client \ - --create-namespace \ - --namespace client \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/client \ - --values ./deploy/client/values.yaml \ - diff --git a/.github/workflows/_deploy-client-gke.yaml b/.github/workflows/_deploy-client-gke.yaml deleted file mode 100644 index 2817461..0000000 --- a/.github/workflows/_deploy-client-gke.yaml +++ /dev/null @@ -1,116 +0,0 @@ -name: _deploy-client-gke - -# Started from GH Docs -# https://docs.github.com/en/actions/deployment/deploying-to-your-cloud-provider/deploying-to-google-kubernetes-engine -on: - workflow_call: - inputs: - version: - type: string - default: main - required: true - dry-run: - type: boolean - description: Run a dry run with helm - required: false - default: false - debug: - type: boolean - description: Run helm in debug mode - required: false - default: false - secrets: - GCP_CREDENTIALS: - required: true - CLUSTER_PREFIX: - required: true - -env: - IMAGE: ${{ github.repository }}/client - GKE_ZONE: us-east1 - REGISTRY: ghcr.io - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - id: "auth" - uses: "google-github-actions/auth@v0" - with: - credentials_json: ${{ secrets.GCP_CREDENTIALS }} - - - name: "Set up Cloud SDK" - uses: google-github-actions/setup-gcloud@v0 - - - name: "Use gcloud CLI" - run: gcloud info - - # Configure Docker to use the gcloud command-line tool as a credential - # helper for authentication - - run: |- - gcloud --quiet auth configure-docker - - # Get the GKE credentials so we can deploy to the cluster - - uses: google-github-actions/get-gke-credentials@fb08709ba27618c31c09e014e1d8364b02e5042e - with: - cluster_name: ${{ secrets.CLUSTER_PREFIX }}-prod - location: ${{ env.GKE_ZONE }} - - # Deploy the Docker image to the GKE cluster with dry run - # Deploy the Docker image to the GKE cluster with dry run - - name: Helm dry-run - if: ${{ inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade client \ - --create-namespace \ - --namespace client \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --dry-run \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/client \ - --values ./deploy/client/values.yaml \ - - # Deploy the Docker image to the GKE cluster for real with debug - - name: Deploy - if: ${{ !inputs.dry-run && inputs.debug }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade client \ - --create-namespace \ - --namespace client \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --atomic \ - --install \ - --timeout 60s \ - --debug \ - ./deploy/client \ - --values ./deploy/client/values.yaml \ - - # Deploy the Docker image to the GKE cluster for real - - name: Deploy - if: ${{ !inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade client \ - --create-namespace \ - --namespace client \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/client \ - --values ./deploy/client/values.yaml \ diff --git a/.github/workflows/_deploy-coprocessor-aws.yaml b/.github/workflows/_deploy-coprocessor-aws.yaml deleted file mode 100644 index b74131e..0000000 --- a/.github/workflows/_deploy-coprocessor-aws.yaml +++ /dev/null @@ -1,108 +0,0 @@ -name: _deploy-coprocessor-aws - -on: - workflow_call: - inputs: - version: - type: string - default: main - required: true - cluster_suffix: - description: Target AWS cluster suffix - type: string - required: true - dry-run: - type: boolean - description: Run a dry run with helm - required: false - default: false - debug: - type: boolean - description: Run helm in debug mode - required: false - default: false - region: - description: AWS target region - type: string - default: us-east-1 - required: false - secrets: - AWS_ACCESS_KEY: - required: true - AWS_SECRET_KEY: - required: true - CLUSTER_PREFIX: - required: true - -env: - IMAGE: ${{ github.repository }}/coprocessor - REGISTRY: ghcr.io - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }} - AWS_REGION: ${{ inputs.region }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: "Configure Kubeconfig w/ EKS" - run: aws eks update-kubeconfig --name ${{ secrets.CLUSTER_PREFIX }}-${{ inputs.cluster_suffix }} --region ${{ env.AWS_REGION }} - - # Deploy the Docker image to the GKE cluster with dry run - - name: Helm dry-run - if: ${{ inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade coprocessor \ - --create-namespace \ - --namespace coprocessor \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --dry-run \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/coprocessor \ - --values ./deploy/coprocessor/values.yaml \ - - # Deploy the Docker image to the GKE cluster for real with debug - - name: Deploy - if: ${{ !inputs.dry-run && inputs.debug }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade coprocessor \ - --create-namespace \ - --namespace coprocessor \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --atomic \ - --install \ - --timeout 60s \ - --debug \ - ./deploy/coprocessor \ - --values ./deploy/coprocessor/values.yaml \ - - # Deploy the Docker image to the GKE cluster for real - - name: Deploy - if: ${{ !inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade coprocessor \ - --create-namespace \ - --namespace coprocessor \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/coprocessor \ - --values ./deploy/coprocessor/values.yaml \ - diff --git a/.github/workflows/_deploy-coprocessor-gke.yaml b/.github/workflows/_deploy-coprocessor-gke.yaml deleted file mode 100644 index ab5b8b3..0000000 --- a/.github/workflows/_deploy-coprocessor-gke.yaml +++ /dev/null @@ -1,120 +0,0 @@ -name: _deploy-coprocessor-gke - -# Started from GH Docs -# https://docs.github.com/en/actions/deployment/deploying-to-your-cloud-provider/deploying-to-google-kubernetes-engine -on: - workflow_call: - inputs: - version: - type: string - default: main - required: true - cluster_suffix: - description: Target GKE cluster suffix - type: string - required: true - dry-run: - type: boolean - description: Run a dry run with helm - required: false - default: false - debug: - type: boolean - description: Run helm in debug mode - required: false - default: false - secrets: - GCP_CREDENTIALS: - required: true - CLUSTER_PREFIX: - required: true - -env: - IMAGE: ${{ github.repository }}/coprocessor - GKE_ZONE: us-east1 - REGISTRY: ghcr.io - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - id: "auth" - uses: "google-github-actions/auth@v0" - with: - credentials_json: ${{ secrets.GCP_CREDENTIALS }} - - - name: "Set up Cloud SDK" - uses: google-github-actions/setup-gcloud@v0 - - - name: "Use gcloud CLI" - run: gcloud info - - # Configure Docker to use the gcloud command-line tool as a credential - # helper for authentication - - run: |- - gcloud --quiet auth configure-docker - - # Get the GKE credentials so we can deploy to the cluster - - uses: google-github-actions/get-gke-credentials@fb08709ba27618c31c09e014e1d8364b02e5042e - with: - cluster_name: ${{ secrets.CLUSTER_PREFIX }}-${{ inputs.cluster_suffix }} - location: ${{ env.GKE_ZONE }} - - # Deploy the Docker image to the GKE cluster with dry run - # Deploy the Docker image to the GKE cluster with dry run - - name: Helm dry-run - if: ${{ inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade coprocessor \ - --create-namespace \ - --namespace coprocessor \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --dry-run \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/coprocessor \ - --values ./deploy/coprocessor/values.yaml \ - - # Deploy the Docker image to the GKE cluster for real with debug - - name: Deploy - if: ${{ !inputs.dry-run && inputs.debug }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade coprocessor \ - --create-namespace \ - --namespace coprocessor \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --atomic \ - --install \ - --timeout 60s \ - --debug \ - ./deploy/coprocessor \ - --values ./deploy/coprocessor/values.yaml \ - - # Deploy the Docker image to the GKE cluster for real - - name: Deploy - if: ${{ !inputs.dry-run }} - run: | - repository=$(echo "$REGISTRY/$IMAGE" | tr '[:upper:]' '[:lower:]') - - helm upgrade coprocessor \ - --create-namespace \ - --namespace coprocessor \ - --set image.repository=$repository \ - --set image.tag=${{ inputs.version }} \ - --atomic \ - --install \ - --timeout 60s \ - ./deploy/coprocessor \ - --values ./deploy/coprocessor/values.yaml \ diff --git a/.github/workflows/_deploy-loadtest-infra-aws.yaml b/.github/workflows/_deploy-loadtest-infra-aws.yaml deleted file mode 100644 index 476d783..0000000 --- a/.github/workflows/_deploy-loadtest-infra-aws.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: _deploy-loadtest-infra-aws - -on: - workflow_call: - secrets: - AWS_ACCESS_KEY: - required: true - AWS_SECRET_KEY: - required: true - CLUSTER_PREFIX: - required: true - -env: - AWS_REGION: us-east-1 # Add your cluster zone here. - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - environment: production - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: "Configure Kubeconfig w/ EKS" - run: aws eks update-kubeconfig --name ${{ secrets.CLUSTER_PREFIX }}-prod --region ${{ env.AWS_REGION }} - - - name: Deploy InfluxDB and Grafana - run: | - helm repo add influxdata https://helm.influxdata.com/ - helm upgrade --install influxdb influxdata/influxdb --namespace monitoring --create-namespace --values ./deploy/influxdb/values.yaml - - helm repo add grafana https://grafana.github.io/helm-charts - helm upgrade --install grafana grafana/grafana --namespace monitoring --values ./deploy/grafana/values.yaml - - - name: Deploy K6 Operator - run: | - helm upgrade k6 \ - --atomic \ - --install \ - ./deploy/k6 \ - --values ./deploy/k6/values.yaml - - - name: Deploy load test scripts - run: | - helm upgrade --install tests ./deploy/tests diff --git a/.github/workflows/_deploy-loadtest-infra-gke.yaml b/.github/workflows/_deploy-loadtest-infra-gke.yaml deleted file mode 100644 index 1127087..0000000 --- a/.github/workflows/_deploy-loadtest-infra-gke.yaml +++ /dev/null @@ -1,71 +0,0 @@ -name: _deploy-loadtest-infra-gcp - -# Started from GH Docs -# https://docs.github.com/en/actions/deployment/deploying-to-your-cloud-provider/deploying-to-google-kubernetes-engine - -# REQUIRED REPO SECRETS -# - GCP_CREDENTIALS -# - CLUSTER_PREFIX - -on: - workflow_call: - secrets: - GCP_CREDENTIALS: - required: true - CLUSTER_PREFIX: - required: true -env: - GKE_ZONE: us-east1 # Add your cluster zone here. - CLUSTER_PREFIX: ${{ secrets.CLUSTER_PREFIX }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - environment: production - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - id: "auth" - uses: "google-github-actions/auth@v0" - with: - credentials_json: ${{ secrets.GCP_CREDENTIALS }} - - - name: "Set up Cloud SDK" - uses: google-github-actions/setup-gcloud@v0 - - - name: "Use gcloud CLI" - run: gcloud info - - # Configure Docker to use the gcloud command-line tool as a credential - # helper for authentication - - run: |- - gcloud --quiet auth configure-docker - - # Get the GKE credentials so we can deploy to the cluster - - uses: google-github-actions/get-gke-credentials@fb08709ba27618c31c09e014e1d8364b02e5042e - with: - cluster_name: ${{ env.CLUSTER_PREFIX }}-prod - location: ${{ env.GKE_ZONE }} - - - name: Deploy InfluxDB and Grafana - run: | - helm repo add influxdata https://helm.influxdata.com/ - helm upgrade --install influxdb influxdata/influxdb --namespace monitoring --create-namespace --values ./deploy/influxdb/values.yaml - - helm repo add grafana https://grafana.github.io/helm-charts - helm upgrade --install grafana grafana/grafana --namespace monitoring --values ./deploy/grafana/values.yaml - - - name: Deploy K6 Operator - run: | - helm upgrade k6 \ - --atomic \ - --install \ - ./deploy/k6 \ - --values ./deploy/k6/values.yaml - - - name: Deploy load test scripts - run: | - helm upgrade --install tests ./deploy/tests diff --git a/.github/workflows/_deploy-otel-collector-aws.yaml b/.github/workflows/_deploy-otel-collector-aws.yaml deleted file mode 100644 index 79df917..0000000 --- a/.github/workflows/_deploy-otel-collector-aws.yaml +++ /dev/null @@ -1,63 +0,0 @@ -name: _deploy-otel-collector-aws - -# Started from GH Docs -# https://docs.github.com/en/actions/deployment/deploying-to-your-cloud-provider/deploying-to-google-kubernetes-engine - -# REQUIRED REPO SECRETS -# - GCP_CREDENTIALS -# - CLUSTER_PREFIX - -on: - workflow_call: - secrets: - AWS_ACCESS_KEY: - required: true - AWS_SECRET_KEY: - required: true - CLUSTER_PREFIX: - required: true - -env: - AWS_REGION: us-east-1 # Add your cluster zone here. - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - environment: production - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: "Configure Kubeconfig w/ EKS" - run: aws eks update-kubeconfig --name ${{ secrets.CLUSTER_PREFIX }}-prod --region ${{ env.AWS_REGION }} - - - name: Deploy Otel - if: ${{ !inputs.dry-run }} - run: | - helm upgrade collector \ - --atomic \ - --install \ - ./deploy/collector \ - --namespace monitoring \ - --create-namespace \ - --set serviceAccount.create=true \ - --values ./deploy/collector/values.yaml - - - name: Helm dependency update - run: | - helm dependency update ./deploy/zipkin - - - name: Deploy Zipkin - if: ${{ !inputs.dry-run }} - run: | - helm upgrade zipkin \ - --atomic \ - --install \ - ./deploy/zipkin \ - --namespace zipkin \ - --create-namespace \ - --values ./deploy/zipkin/values.yaml \ No newline at end of file diff --git a/.github/workflows/_deploy-otel-collector-gke.yaml b/.github/workflows/_deploy-otel-collector-gke.yaml deleted file mode 100644 index d1d7ed3..0000000 --- a/.github/workflows/_deploy-otel-collector-gke.yaml +++ /dev/null @@ -1,77 +0,0 @@ -name: _deploy-otel-collector-gcp - -# Started from GH Docs -# https://docs.github.com/en/actions/deployment/deploying-to-your-cloud-provider/deploying-to-google-kubernetes-engine - -# REQUIRED REPO SECRETS -# - GCP_CREDENTIALS -# - CLUSTER_PREFIX - -on: - workflow_call: - secrets: - GCP_CREDENTIALS: - required: true - CLUSTER_PREFIX: - required: true -env: - GKE_ZONE: us-east1 # Add your cluster zone here. - CLUSTER_PREFIX: ${{ secrets.CLUSTER_PREFIX }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - environment: production - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - id: "auth" - uses: "google-github-actions/auth@v0" - with: - credentials_json: ${{ secrets.GCP_CREDENTIALS }} - - - name: "Set up Cloud SDK" - uses: google-github-actions/setup-gcloud@v0 - - - name: "Use gcloud CLI" - run: gcloud info - - # Configure Docker to use the gcloud command-line tool as a credential - # helper for authentication - - run: |- - gcloud --quiet auth configure-docker - - # Get the GKE credentials so we can deploy to the cluster - - uses: google-github-actions/get-gke-credentials@fb08709ba27618c31c09e014e1d8364b02e5042e - with: - cluster_name: ${{ env.CLUSTER_PREFIX }}-prod - location: ${{ env.GKE_ZONE }} - - - name: Helm dependency update - run: | - helm dependency update ./deploy/zipkin - - - name: Deploy Otel - if: ${{ !inputs.dry-run }} - run: | - helm upgrade collector \ - --atomic \ - --install \ - ./deploy/collector \ - --namespace monitoring \ - --create-namespace \ - --values ./deploy/collector/values.yaml - - - name: Deploy Zipkin - if: ${{ !inputs.dry-run }} - run: | - helm upgrade zipkin \ - --atomic \ - --install \ - ./deploy/zipkin \ - --namespace zipkin \ - --create-namespace \ - --values ./deploy/zipkin/values.yaml \ No newline at end of file diff --git a/.github/workflows/_deploy-router-aws.yaml b/.github/workflows/_deploy-router-aws.yaml deleted file mode 100644 index 034eef4..0000000 --- a/.github/workflows/_deploy-router-aws.yaml +++ /dev/null @@ -1,116 +0,0 @@ -name: _deploy-router-aws - -on: - workflow_call: - inputs: - environment: - description: "Target variant" - type: string - required: true - default: dev - variant: - description: "Target variant" - type: string - required: true - default: dev - dry-run: - type: boolean - description: "Run a dry run with helm" - required: false - default: false - debug: - type: boolean - description: "Run helm in debug mode" - required: false - default: false - secrets: - AWS_ACCESS_KEY: - required: true - AWS_SECRET_KEY: - required: true - CLUSTER_PREFIX: - required: true - APOLLO_GRAPH_ID: - required: true - APOLLO_KEY: - required: true -env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }} - AWS_REGION: us-east-1 - CLUSTER_PREFIX: ${{ secrets.CLUSTER_PREFIX }} - APOLLO_GRAPH_ID: ${{ secrets.APOLLO_GRAPH_ID }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: "Configure Kubeconfig w/ EKS" - run: aws eks update-kubeconfig --name ${{ secrets.CLUSTER_PREFIX }}-${{ inputs.environment }} --region ${{ env.AWS_REGION }} - - - name: Helm dependency update - run: | - helm dependency update ./deploy/router - - # Deploy the Docker image to the GKE cluster with dry run - - name: Helm dry-run - if: ${{ inputs.dry-run }} - run: | - helm upgrade router \ - --create-namespace \ - --namespace router \ - --set router.managedFederation.graphRef=$APOLLO_GRAPH_ID@${{ inputs.variant }} \ - --set router.managedFederation.apiKey=${{ secrets.APOLLO_KEY }} \ - --set ingress.annotations."kubernetes\.io/ingress\.class"=alb \ - --set ingress.annotations."alb\.ingress\.kubernetes\.io/target-type"=ip \ - --set ingress.annotations."alb\.ingress\.kubernetes\.io/scheme"=internet-facing \ - --set ingress.gcp=false \ - --dry-run \ - --atomic \ - --install \ - ./deploy/router \ - --values ./deploy/router/values.yaml \ - -f ./deploy/router/environments/${{ inputs.environment }}.yaml - - # Deploy the Docker image to the GKE cluster for real with debug - - name: Deploy - if: ${{ !inputs.dry-run && inputs.debug }} - run: | - helm upgrade router \ - --create-namespace \ - --namespace router \ - --set router.managedFederation.graphRef=$APOLLO_GRAPH_ID@${{ inputs.variant }} \ - --set router.managedFederation.apiKey=${{ secrets.APOLLO_KEY }} \ - --set ingress.annotations."kubernetes\.io/ingress\.class"=alb \ - --set ingress.annotations."alb\.ingress\.kubernetes\.io/target-type"=ip \ - --set ingress.annotations."alb\.ingress\.kubernetes\.io/scheme"=internet-facing \ - --set ingress.gcp=false \ - --atomic \ - --install \ - --debug \ - ./deploy/router \ - --values ./deploy/router/values.yaml \ - -f ./deploy/router/environments/${{ inputs.environment }}.yaml - - # Deploy the Docker image to the GKE cluster for real - - name: Deploy - if: ${{ !inputs.dry-run }} - run: | - helm upgrade router \ - --create-namespace \ - --namespace router \ - --set router.managedFederation.graphRef=$APOLLO_GRAPH_ID@${{ inputs.variant }} \ - --set router.managedFederation.apiKey=${{ secrets.APOLLO_KEY }} \ - --set ingress.annotations."kubernetes\.io/ingress\.class"=alb \ - --set ingress.annotations."alb\.ingress\.kubernetes\.io/target-type"=ip \ - --set ingress.annotations."alb\.ingress\.kubernetes\.io/scheme"=internet-facing \ - --set ingress.gcp=false \ - --atomic \ - --install \ - ./deploy/router \ - --values ./deploy/router/values.yaml \ - -f ./deploy/router/environments/${{ inputs.environment }}.yaml diff --git a/.github/workflows/_deploy-router-gke.yaml b/.github/workflows/_deploy-router-gke.yaml deleted file mode 100644 index 233f477..0000000 --- a/.github/workflows/_deploy-router-gke.yaml +++ /dev/null @@ -1,129 +0,0 @@ -name: _deploy-router-gcp - -# Started from GH Docs -# https://docs.github.com/en/actions/deployment/deploying-to-your-cloud-provider/deploying-to-google-kubernetes-engine - -# REQUIRED REPO SECRETS -# - GCP_CREDENTIALS -# - CLUSTER_PREFIX -# - APOLLO_GRAPH_ID -# - APOLLO_KEY - -on: - workflow_call: - inputs: - environment: - description: "Target variant" - type: string - required: true - default: dev - variant: - description: "Target variant" - type: string - required: true - default: dev - dry-run: - type: boolean - description: "Run a dry run with helm" - required: false - default: false - debug: - type: boolean - description: "Run helm in debug mode" - required: false - default: false - secrets: - GCP_CREDENTIALS: - required: true - CLUSTER_PREFIX: - required: true - APOLLO_GRAPH_ID: - required: true - APOLLO_KEY: - required: true - -env: - GKE_ZONE: us-east1 # Add your cluster zone here. - CLUSTER_PREFIX: ${{ secrets.CLUSTER_PREFIX }} - APOLLO_GRAPH_ID: ${{ secrets.APOLLO_GRAPH_ID }} - -jobs: - deploy: - name: Deploy - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - id: "auth" - uses: "google-github-actions/auth@v0" - with: - credentials_json: ${{ secrets.GCP_CREDENTIALS }} - - - name: "Set up Cloud SDK" - uses: google-github-actions/setup-gcloud@v0 - - - name: "Use gcloud CLI" - run: gcloud info - - # Configure Docker to use the gcloud command-line tool as a credential - # helper for authentication - - run: |- - gcloud --quiet auth configure-docker - - # Get the GKE credentials so we can deploy to the cluster - - uses: google-github-actions/get-gke-credentials@fb08709ba27618c31c09e014e1d8364b02e5042e - with: - cluster_name: ${{ env.CLUSTER_PREFIX }}-${{ inputs.environment }} - location: ${{ env.GKE_ZONE }} - - - name: Helm dependency update - run: | - helm dependency update ./deploy/router - - # Deploy the Docker image to the GKE cluster with dry run - - name: Helm dry-run - if: ${{ inputs.dry-run }} - run: | - helm upgrade router \ - --create-namespace \ - --namespace router \ - --set router.managedFederation.graphRef=$APOLLO_GRAPH_ID@${{ inputs.variant }} \ - --set router.managedFederation.apiKey=${{ secrets.APOLLO_KEY }} \ - --dry-run \ - --atomic \ - --install \ - ./deploy/router \ - --values ./deploy/router/values.yaml \ - -f ./deploy/router/environments/${{ inputs.environment }}.yaml - - # Deploy the Docker image to the GKE cluster for real with debug - - name: Deploy - if: ${{ !inputs.dry-run && inputs.debug }} - run: | - helm upgrade router \ - --create-namespace \ - --namespace router \ - --set router.managedFederation.graphRef=$APOLLO_GRAPH_ID@${{ inputs.variant }} \ - --set router.managedFederation.apiKey=${{ secrets.APOLLO_KEY }} \ - --atomic \ - --install \ - --debug \ - ./deploy/router \ - --values ./deploy/router/values.yaml \ - -f ./deploy/router/environments/${{ inputs.environment }}.yaml - - # Deploy the Docker image to the GKE cluster for real - - name: Deploy - if: ${{ !inputs.dry-run }} - run: | - helm upgrade router \ - --create-namespace \ - --namespace router \ - --set router.managedFederation.graphRef=$APOLLO_GRAPH_ID@${{ inputs.variant }} \ - --set router.managedFederation.apiKey=${{ secrets.APOLLO_KEY }} \ - --atomic \ - --install \ - ./deploy/router \ - --values ./deploy/router/values.yaml \ - -f ./deploy/router/environments/${{ inputs.environment }}.yaml diff --git a/.github/workflows/_determine-provider.yaml b/.github/workflows/_determine-provider.yaml deleted file mode 100644 index 1e02227..0000000 --- a/.github/workflows/_determine-provider.yaml +++ /dev/null @@ -1,36 +0,0 @@ -name: _determine-provider - -on: - workflow_call: - secrets: - AWS_ACCESS_KEY: - required: false - GCP_CREDENTIALS: - required: false - APOLLO_KEY: - required: false - outputs: - gcp: - value: ${{ jobs.provider.outputs.gcp}} - aws: - value: ${{ jobs.provider.outputs.aws}} - apollo: - value: ${{ jobs.provider.outputs.apollo}} -jobs: - provider: - env: - GCP: ${{ secrets.GCP_CREDENTIALS }} - AWS: ${{ secrets.AWS_ACCESS_KEY }} - APOLLO_KEY: ${{ secrets.APOLLO_KEY }} - runs-on: ubuntu-latest - outputs: - gcp: ${{ steps.gcp.outputs.gcp}} - aws: ${{ steps.aws.outputs.aws}} - apollo: ${{ steps.graphos.outputs.apollo }} - steps: - - id: gcp - run: if [ -n "$GCP" ]; then echo "gcp=true" >> $GITHUB_OUTPUT ; else echo "gcp=false" >> $GITHUB_OUTPUT ; fi - - id: aws - run: if [ -n "$AWS" ]; then echo "aws=true" >> $GITHUB_OUTPUT ; else echo "aws=false" >> $GITHUB_OUTPUT ; fi - - id: graphos - run: if [ -n "$APOLLO_KEY" ]; then echo "apollo=true" >> $GITHUB_OUTPUT ; else echo "apollo=false" >> $GITHUB_OUTPUT ; fi \ No newline at end of file diff --git a/.github/workflows/_rover-client-pq-publish.yml b/.github/workflows/_rover-client-pq-publish.yml deleted file mode 100644 index a5222ca..0000000 --- a/.github/workflows/_rover-client-pq-publish.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: _rover-client-pq-publish - -on: - workflow_call: - inputs: - environment: - description: "Target variant" - type: string - required: true - default: dev - secrets: - APOLLO_KEY: - required: true - APOLLO_GRAPH_ID: - required: true - -env: - APOLLO_KEY: ${{ secrets.APOLLO_KEY }} - APOLLO_VCS_COMMIT: ${{ github.event.pull_request.head.sha }} - -jobs: - check: - name: Rover PQ Publish - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Install Rover - uses: apollographql-gh-actions/install-rover@v1 - - - name: Generate PQ manifest - run: | - cd ./client/ - npx @apollo/generate-persisted-query-manifest - - - name: Publish PQ Manifest - uses: apollographql-gh-actions/rover-persisted-queries-publish@v1 - with: - apollo-key: ${{ secrets.APOLLO_KEY }} - graph-ref: ${{ secrets.APOLLO_GRAPH_ID }}@${{ inputs.environment }} - manifest: ./client/persisted-query-manifest.json - \ No newline at end of file diff --git a/.github/workflows/_rover-subgraph-check.yml b/.github/workflows/_rover-subgraph-check.yml deleted file mode 100644 index 9faa4f2..0000000 --- a/.github/workflows/_rover-subgraph-check.yml +++ /dev/null @@ -1,42 +0,0 @@ -name: _rover-subgraph-check - -on: - workflow_call: - inputs: - subgraph_name: - description: Subgraph name in Studio - type: string - required: true - variant: - description: Apollo Studio variant - type: string - required: true - secrets: - APOLLO_KEY: - required: true - APOLLO_GRAPH_ID: - required: true - -env: - APOLLO_KEY: ${{ secrets.APOLLO_KEY }} - APOLLO_VCS_COMMIT: ${{ github.event.pull_request.head.sha }} - -jobs: - check: - name: Rover Subgraph Check - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Install Rover - uses: apollographql-gh-actions/install-rover@v1 - - - name: Rover Subgraph Check - uses: apollographql-gh-actions/rover-subgraph-check@v1 - with: - apollo-key: ${{ secrets.APOLLO_KEY }} - graph-ref: ${{ secrets.APOLLO_GRAPH_ID }}@${{ inputs.variant }} - name: ${{ inputs.subgraph_name }} - schema: ./subgraphs/${{inputs.subgraph_name}}/schema.graphql diff --git a/.github/workflows/_run-loadtest-aws.yaml b/.github/workflows/_run-loadtest-aws.yaml deleted file mode 100644 index 1e6b65d..0000000 --- a/.github/workflows/_run-loadtest-aws.yaml +++ /dev/null @@ -1,59 +0,0 @@ -name: _run-loadtest-aws - -on: - workflow_call: - inputs: - test: - description: Test to Run - type: string - required: true - parallelism: - description: Number of workers - type: string - default: "1" - required: true - secrets: - AWS_ACCESS_KEY: - required: true - AWS_SECRET_KEY: - required: true - CLUSTER_PREFIX: - required: true -env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }} - AWS_REGION: us-east-1 - CLUSTER_PREFIX: ${{ secrets.CLUSTER_PREFIX }} - -jobs: - deploy: - name: test - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: "Configure Kubeconfig w/ EKS" - run: aws eks update-kubeconfig --name ${{ secrets.CLUSTER_PREFIX }}-prod --region ${{ env.AWS_REGION }} - - - name: "Apply K6" - env: - RUN_NAME: run-${{ inputs.test }}-${{ github.run_id }} - run: | - cat <> "$GITHUB_OUTPUT" - npm-build: - runs-on: ubuntu-latest - needs: define-matrix - strategy: - matrix: - subgraphs: ${{ fromJson(needs.define-matrix.outputs.subgraphs) }} - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 - with: - node-version: 22.x - cache: "npm" - - - run: npm ci - - run: npm run build -w subgraphs/${{ matrix.subgraphs }} --if-present - diff --git a/.github/workflows/pr-check-deploy.yaml b/.github/workflows/pr-check-deploy.yaml deleted file mode 100644 index 9268f02..0000000 --- a/.github/workflows/pr-check-deploy.yaml +++ /dev/null @@ -1,18 +0,0 @@ -name: Pull Request Check Deploy on Subgraph Chart - -on: - pull_request: - paths: - - "deploy/**" - -jobs: - # See GH Action: https://github.com/helm/chart-testing-action - helm-lint-test: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: Helm lint - id: list-changed - run: helm lint ./deploy/subgraph diff --git a/.github/workflows/run-load-test.yaml b/.github/workflows/run-load-test.yaml deleted file mode 100644 index 545fea3..0000000 --- a/.github/workflows/run-load-test.yaml +++ /dev/null @@ -1,44 +0,0 @@ -name: Run Load Test - -# REQUIRED REPO SECRETS -# - GCP_CREDENTIALS -# - CLUSTER_PREFIX - -on: - workflow_dispatch: - inputs: - test: - description: Test to Run - type: choice - required: true - options: - - short - - long - parallelism: - description: Number of workers - type: string - default: "1" - required: true - -jobs: - determine_cloud_provider: - uses: ./.github/workflows/_determine-provider.yaml - secrets: inherit - - deploy_gcp: - if: needs.determine_cloud_provider.outputs.gcp == 'true' - needs: [determine_cloud_provider] - uses: ./.github/workflows/_run-loadtest-gke.yaml - secrets: inherit - with: - test: ${{ inputs.test }} - parallelism: ${{ inputs.parallelism }} - - deploy_aws: - if: needs.determine_cloud_provider.outputs.aws == 'true' - needs: [determine_cloud_provider] - uses: ./.github/workflows/_run-loadtest-aws.yaml - secrets: inherit - with: - test: ${{ inputs.test }} - parallelism: ${{ inputs.parallelism }} diff --git a/.github/workflows/uninstall-router.yaml b/.github/workflows/uninstall-router.yaml deleted file mode 100644 index 542b8ef..0000000 --- a/.github/workflows/uninstall-router.yaml +++ /dev/null @@ -1,42 +0,0 @@ -name: Uninstall Router - -# REQUIRED REPO SECRETS -# - GCP_CREDENTIALS -# - CLUSTER_PREFIX - -on: - workflow_dispatch: - -jobs: - determine_cloud_provider: - uses: ./.github/workflows/_determine-provider.yaml - secrets: inherit - - uninstall_dev_gcp: - if: needs.determine_cloud_provider.outputs.gcp == 'true' - needs: [determine_cloud_provider] - uses: ./.github/workflows/_uninstall-router-gke.yaml - secrets: inherit - with: - environment: dev - uninstall_prod_gcp: - if: needs.determine_cloud_provider.outputs.gcp == 'true' - needs: [determine_cloud_provider] - uses: ./.github/workflows/_uninstall-router-gke.yaml - secrets: inherit - with: - environment: prod - uninstall_dev_aws: - if: needs.determine_cloud_provider.outputs.aws == 'true' - needs: [determine_cloud_provider] - uses: ./.github/workflows/_uninstall-router-aws.yaml - secrets: inherit - with: - environment: dev - uninstall_prod_aws: - if: needs.determine_cloud_provider.outputs.aws == 'true' - needs: [determine_cloud_provider] - uses: ./.github/workflows/_uninstall-router-aws.yaml - secrets: inherit - with: - environment: prod \ No newline at end of file diff --git a/README.md b/README.md index 9130016..7d6992c 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,17 @@ # Apollo Federation Supergraph Architecture -This repository contains a reference architecture utilizing [Kubernetes](https://kubernetes.io/docs/concepts/overview/) when using [Apollo Federation](https://www.apollographql.com/docs/federation/). It uses GitHub Actions configured to automate most of the deployment processes for the router, subgraphs, and client, along with minimal observability tooling available to be able to appropriately load test the resulting environment. +This repository contains a reference architecture utilizing [Kubernetes](https://kubernetes.io/docs/concepts/overview/) when using [Apollo Federation](https://www.apollographql.com/docs/federation/). It is designed to run locally on [Minikube](https://minikube.sigs.k8s.io/) for development and testing purposes. Once the architecture is fully stood up, you'll have: - An Apollo Router running and managed by the [Apollo GraphOS Operator](https://www.apollographql.com/docs/apollo-operator/), utilizing: - - [Persisted Queries for safelisting operations](https://www.apollographql.com/docs/router/configuration/persisted-queries/#differences-from-automatic-persisted-queries) - - [A coprocessor for handling customizations outside of the router](https://www.apollographql.com/docs/router/customizations/coprocessor) - - [Rhai scripts to do basic customizations within the router container](https://www.apollographql.com/docs/router/customizations/rhai) + - [A coprocessor for handling customizations outside of the router](https://www.apollographql.com/docs/router/customizations/coprocessor) (tracked, not deployed yet) + - [Rhai scripts to do basic customizations within the router container](https://www.apollographql.com/docs/router/customizations/rhai) (tracked, not deployed yet) - [Authorization/Authentication directives](https://www.apollographql.com/docs/router/configuration/authorization) -- Eight subgraphs, each handling a portion of the overall supergraph schema, with schemas automatically published to GraphOS via the operator -- A React-based frontend application utilizing Apollo Client +- Eight subgraphs, each handling a portion of the overall supergraph schema, with schemas automatically published to GraphOS via the operator using inline SDL +- A React-based frontend application utilizing Apollo Client (optional) - Apollo GraphOS Operator for automated schema publishing, composition, and deployment -- Tools to run k6 load tests against the architecture from within the same cluster +- Step-by-step scripts for easy local setup and deployment ### The ending architecture @@ -23,27 +22,31 @@ Once the architecture is fully stood up, you'll have: At a minimum, you will need: -- A Github account. -- An enterprise Apollo GraphOS account. +- [Minikube](https://minikube.sigs.k8s.io/docs/start/) installed and configured +- [kubectl](https://kubernetes.io/docs/tasks/tools/) installed +- [Helm](https://helm.sh/docs/intro/install/) installed +- [Docker](https://docs.docker.com/get-docker/) installed +- [jq](https://stedolan.github.io/jq/download/) installed +- [curl](https://curl.se/) installed +- An [Apollo GraphOS account](https://studio.apollographql.com/signup) with a Personal API key - You can use [a free enterprise trial account](https://studio.apollographql.com/signup?type=enterprise-trial) if you don't have an enterprise contract. -- An account for either: - - Google Cloud Platform (GCP). - - Amazon Web Services (AWS). -Further requirements are noted within the [setup instructions](./docs/setup.md) as each type of environment (cloud vs. local) requires additional tooling. +Further requirements and detailed setup instructions are available in the [setup guide](./docs/setup.md). ## Contents -- ⏱ estimated time: 1 hour 15 minutes -- 💰 estimated cost (if using a cloud provider): $10-$15 +- ⏱ estimated time: 30 minutes +- 💰 estimated cost: $0 (runs locally on your machine) ### [Setup](/docs/setup.md) During setup, you'll be: -- Gathering accounts and credentials -- Provisioning resources -- Deploying the applications, including router, subgraphs, client, and observability tools +- Installing and configuring Minikube +- Creating an Apollo GraphOS graph and variants +- Setting up the Kubernetes cluster and Apollo GraphOS Operator +- Building Docker images locally +- Deploying subgraphs, router, and client using step-by-step scripts ### [Operator Guide](/docs/operator-guide.md) diff --git a/deploy/client/values.yaml b/deploy/client/values.yaml index 48ab896..ddcd1cd 100644 --- a/deploy/client/values.yaml +++ b/deploy/client/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/client - pullPolicy: Always - tag: main + repository: client + pullPolicy: Never + tag: local nameOverride: web fullnameOverride: web @@ -11,10 +11,6 @@ fullnameOverride: web serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/deploy/coprocessor/values.yaml b/deploy/coprocessor/values.yaml index e640e57..a2a13b3 100644 --- a/deploy/coprocessor/values.yaml +++ b/deploy/coprocessor/values.yaml @@ -3,9 +3,9 @@ namespace: apollo replicaCount: 3 image: - repository: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/coprocessor - pullPolicy: Always - tag: main + repository: coprocessor + pullPolicy: Never + tag: local nameOverride: coprocessor fullnameOverride: coprocessor @@ -13,10 +13,6 @@ fullnameOverride: coprocessor serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/deploy/operator-resources/apply-resources.sh b/deploy/operator-resources/apply-resources.sh index 3967579..56d2f89 100755 --- a/deploy/operator-resources/apply-resources.sh +++ b/deploy/operator-resources/apply-resources.sh @@ -2,36 +2,92 @@ set -euo pipefail # This script applies the operator resources with the correct graph ID -# Usage: ./apply-resources.sh {dev|prod} +# Usage: ./apply-resources.sh [environment] +# Environment defaults to "dev" if not specified ENVIRONMENT=${1:-dev} -if [[ "$ENVIRONMENT" != "dev" && "$ENVIRONMENT" != "prod" ]]; then - echo "Error: Environment must be 'dev' or 'prod'" - exit 1 +# Check if APOLLO_GRAPH_ID is set (load from .env if available) +if [ -f .env ]; then + source .env fi -# Check if TF_VAR_apollo_graph_id is set -if [[ -z "${TF_VAR_apollo_graph_id:-}" ]]; then - echo "Error: TF_VAR_apollo_graph_id is not set. Please source .env file from your terraform directory." - exit 1 +if [[ -z "${APOLLO_GRAPH_ID:-}" ]]; then + echo "Error: APOLLO_GRAPH_ID is not set. Please source .env file or set it as an environment variable." + exit 1 fi -echo "Deploying operator resources for ${ENVIRONMENT} environment with graph ID: ${TF_VAR_apollo_graph_id}" +echo "Deploying operator resources for ${ENVIRONMENT} environment with graph ID: ${APOLLO_GRAPH_ID}" -# Apply SupergraphSchema with graph ID substitution -if command -v envsubst &> /dev/null; then - envsubst < "supergraphschema-${ENVIRONMENT}.yaml" | kubectl apply -f - +RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" + +# Apply SupergraphSchema +cat < - -### Gather accounts +This guide will walk you through setting up the Apollo Federation Supergraph reference architecture on Minikube for local development. -- [Github](https://github.com/signup) -- [Apollo GraphOS](https://studio.apollographql.com/signup?referrer=reference-architecture) -- If using a cloud provider: - - [Google Cloud](https://console.cloud.google.com/freetrial) - - Must have a project [with billing enabled](https://cloud.google.com/resource-manager/docs/creating-managing-projects#gcloud) - - [AWS](https://signin.aws.amazon.com/signin) with billing enabled - -### Gather credentials +- [Setup](#setup) + - [Prerequisites](#prerequisites) + - [Step 1: Install Minikube and Dependencies](#step-1-install-minikube-and-dependencies) + - [Step 2: Configure Environment Variables](#step-2-configure-environment-variables) + - [Step 3: Run Setup Scripts](#step-3-run-setup-scripts) + - [Step 4: Access Your Supergraph](#step-4-access-your-supergraph) + - [Creating Additional Environments](#creating-additional-environments) -#### GCP +## Prerequisites -- Google Cloud project ID -- [Github personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) - - [Settings > Developer Settings > Personal Access Tokens](https://github.com/settings/tokens) - - Grant it permissions to the following scopes: - - `repo` (for creating repos) - - `delete_repo` (for cleanup at the end) -- [Apollo GraphOS Personal API key](https://studio.apollographql.com/user-settings/api-keys) +Before you begin, ensure you have: -#### AWS +- [Minikube](https://minikube.sigs.k8s.io/docs/start/) installed and configured +- [kubectl](https://kubernetes.io/docs/tasks/tools/) installed +- [Helm](https://helm.sh/docs/intro/install/) installed +- [Docker](https://docs.docker.com/get-docker/) installed +- [jq](https://stedolan.github.io/jq/download/) installed +- [curl](https://curl.se/) installed +- An [Apollo GraphOS account](https://studio.apollographql.com/signup) with a Personal API key -- [AWS Access Key and Secret for use with the AWS CLI*](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) - - Additionally, ensure you either: - - Set the default region during the AWS CLI configuration - - Set the `AWS_REGION` environment variable when running commands -- [Github personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) - - [Settings > Developer Settings > Personal Access Tokens](https://github.com/settings/tokens) - - Grant it permissions to the following scopes: - - `repo` (for creating repos) - - `delete_repo` (for cleanup at the end) -- [Apollo GraphOS Personal API key](https://studio.apollographql.com/user-settings/api-keys) +### Get Your Apollo GraphOS Personal API Key -\* Please note to use an account with Administrator privileges, or at minimum, the ability to run: +1. Go to [Apollo GraphOS Studio](https://studio.apollographql.com) +2. Navigate to [User Settings > API Keys](https://studio.apollographql.com/user-settings/api-keys) +3. Create a new Personal API key or use an existing one +4. Copy the API key value -* Terraform, which creates: - * IAM user and policy - * EKS cluster and node groups, and associates IAM permissions to Kubernetes service accounts - * VPC and subnets +## Step 1: Install Minikube and Dependencies -### Export all necessary variables +### Install Minikube -First, change directories in the cloud provider you wish to use. All Terraform is within the `terraform` root level folder, with each provider having a subfolder within. For the below examples, we'll assume GCP, however the others will use the same commands. +Follow the [Minikube installation guide](https://minikube.sigs.k8s.io/docs/start/) for your operating system. -Next, make a copy of `.env.sample` called `.env` to keep track of these values. You can run `source .env` to reload all environment variables in a new terminal session. +### Verify Installation -```sh -# in either terraform/aws or terraform/gcp -cp .env.sample .env +```bash +minikube version +kubectl version --client +helm version +docker --version ``` -Edit the new `.env` file: - -```sh -export PROJECT_ID="" # if using AWS, you will not see this line and can omit this -export APOLLO_KEY="" -export GITHUB_ORG="" # (not a git URL, just the username/org name) -export TF_VAR_github_token="" -``` +## Step 2: Configure Environment Variables -Run this script to create your graph and get environment variables for GraphOS: +1. Copy the environment template: -```sh -# in the respective terraform/ folder -source .env -./create_graph.sh +```bash +cp scripts/minikube/.env.sample .env ``` -**Note:** With the Apollo GraphOS Operator, this script no longer publishes subgraph schemas to GraphOS. The schemas will be automatically published by the operator when Subgraph CRDs are deployed. - -The script adds a few more environment variables to `.env`, so reload your environment using: +2. Edit `.env` and set your Apollo GraphOS Personal API key and environment: -```sh -source .env +```bash +export APOLLO_KEY="your-apollo-personal-api-key" +export ENVIRONMENT="dev" # Required: e.g., "dev", "prod", "staging" ``` -### Run setup commands +The `ENVIRONMENT` variable is required and allows you to create multiple environments. Each environment will have its own Apollo GraphOS variant. -#### GCP +**Note:** When deploying subgraphs, the scripts will look for environment-specific values files at `subgraphs/{subgraph}/deploy/environments/${ENVIRONMENT}.yaml`. If this file exists, it will be used to override the default `values.yaml`. If it doesn't exist, the default `values.yaml` will be used. The repository includes `dev.yaml` and `prod.yaml` files for all subgraphs. If you create a custom environment name, you can optionally create matching values files for environment-specific configurations. -```sh -gcloud components update -gcloud components install gke-gcloud-auth-plugin -gcloud auth login +## Step 3: Run Setup Scripts -gcloud config set project ${PROJECT_ID} -gcloud services enable \ - container.googleapis.com \ - secretmanager.googleapis.com \ - cloudasset.googleapis.com \ - storage.googleapis.com -gh auth login -``` +Run the scripts in order from the repository root: -#### AWS +### Script 01: Setup Minikube Cluster -```sh -aws configure -gh auth login +```bash +./scripts/minikube/01-setup-minikube.sh ``` +This script: +- Starts or creates a Minikube cluster +- Enables the ingress addon for external access +- Configures kubectl to use the Minikube context - - -#### General - -
- Optional: how do I specify a different name for clusters and repos? (The default is "apollo-supergraph-k8s".) - -Before running `create_graph.sh`, `setup_clusters.sh`, or `terraform apply` export the prefix as as environment variables: - -```sh -export CLUSTER_PREFIX=my-custom-prefix -export TF_VAR_demo_name=$CLUSTER_PREFIX +```bash +./scripts/minikube/02-setup-apollo-graph.sh ``` -
- -## Part B: Provision resources - -
- Have you run this tutorial before? - -You may need to clean up your Github packages before creating new repos of the same name. Visit `https://github.com/?tab=packages` and delete the packages created by the previous versions of the repos. +This script: +- Creates an Apollo GraphOS graph +- Creates an Operator API key +- Creates a variant for your environment +- Saves configuration to `.env` -
+**Note:** Make sure your `.env` file has `APOLLO_KEY` set before running this script. -### Create Kubernetes clusters, basic infrastructure, and Github repositories +### Script 03: Setup Kubernetes Cluster -**Note: If using a cloud provider, the following commands will create resources on your cloud provider account and begin to accrue a cost.** The reference infrastructure defaults to a lower-cost environment (small node count and instance size), however it will not be covered by either of GCP's or AWS's free tiers. - - - -```sh -# for example, if using GCP -cd terraform/gcp -terraform init # takes about 2 minutes -terraform apply # will print plan then prompt for confirmation -# takes about 10-15 minutes +```bash +source .env # Load the variables set by script 02 +./scripts/minikube/03-setup-cluster.sh ``` -**Note**: If using GCP, you might get an `Invalid provider configuration (no credentials loaded)` error when running `terraform apply`, please run `gcloud auth application-default login` and try again. +This script: +- Creates required namespaces (`apollo-operator`, `apollo`) +- Creates the operator API key secret +- Installs the Apollo GraphOS Operator via Helm -Expected output: +### Script 04: Build Docker Images +```bash +./scripts/minikube/04-build-images.sh ``` -kubernetes_cluster_names = { - "dev" = "apollo-supergraph-k8s-dev" - "prod" = "apollo-supergraph-k8s-prod" -} -repo = "https://github.com/you/reference-architecture" -``` - -
- What does this do? - -Terraform provisions: - -- Two Kubernetes clusters (dev and prod) -- The GitHub repository (`/reference-architecture`) -- GitHub action secrets for GCP/AWS and Apollo credentials - -The subgraph repos are configured to build and deploy to the `dev` cluster once they're provisioned. (The deploy will fail the first time. See "Note about "initial commit" errors" below.) - -
-### Run cluster setup script +This script: +- Configures Docker to use Minikube's Docker daemon +- Builds all subgraph images locally +- Tags images as `{subgraph}:local` +- Builds coprocessor and client images (for future use) -After creating the necessary clusters, you will need to run the included cluster setup script: +### Script 05: Deploy Subgraphs -```sh -# for example, if using GCP -cd terraform/gcp -./setup_clusters.sh # takes about 2 minutes +```bash +./scripts/minikube/05-deploy-subgraphs.sh ``` -
- What does this do? - -For both `dev` and `prod` clusters: - -- Configures your local `kubectl` environment so you can inspect your clusters -- For GCP users: - - Configures namespace, service account, and role bindings for Open Telemetry and Google Traces. -- For AWS users: - - Configures load balancer controller policy and IAM service account -- **New**: Installs the Apollo GraphOS Operator via Helm -- **New**: Creates the `apollo-operator` and `apollo` namespaces -- **New**: Creates the operator API key secret - - -
- -**Note**: The `create_graph.sh` script automatically creates an Operator API key using the Platform API. This key is exported as `OPERATOR_KEY` in your `.env` file and will be used by `setup_clusters.sh` to configure the operator secret. - -After this completes, you're ready to deploy your subgraphs! - -## Part C: Deploy applications - - - -### Deploy subgraphs to dev - -**Note:** The image pull secret is automatically created by `setup_clusters.sh` if `TF_VAR_github_token` and `GITHUB_ORG` are set. `GITHUB_ORG` must be your GitHub username or organization name (e.g., `andywgarcia`), not a git URL. If you need to create it manually, use the commands below with your GitHub username and token. - -Deploy the subgraph services and register them with the operator: - -```sh -# Deploy each subgraph service -for subgraph in checkout discovery inventory orders products reviews shipping users; do - kubectl create namespace $subgraph --dry-run=client -o yaml | kubectl apply -f - - - # Copy the image pull secret to each namespace (if it exists) - if kubectl get secret ghcr-secret -n default &>/dev/null; then - kubectl get secret ghcr-secret -n default -o yaml | \ - sed 's/namespace: default/namespace: '"$subgraph"'/' | \ - kubectl apply -f - - fi - - # Install (imagePullSecrets are configured in values.yaml) - helm install $subgraph subgraphs/$subgraph/deploy \ - -f subgraphs/$subgraph/deploy/environments/dev.yaml \ - -n $subgraph - - kubectl apply -f subgraphs/$subgraph/k8s/subgraph-dev.yaml -done -``` +This script: +- Deploys each subgraph using Helm charts +- Creates Subgraph CRDs with inline SDL schemas +- Configures images to use local builds -The operator will automatically publish schemas to GraphOS and trigger composition. You can monitor the progress: +Monitor subgraph deployment: -```sh -# Check if subgraphs are registered +```bash kubectl get subgraphs --all-namespaces - +kubectl get pods --all-namespaces ``` -You can try out a subgraph using port forwarding: +### Script 06: Deploy Operator Resources -```sh -kubectl port-forward service/graphql -n checkout 4001:4001 +```bash +./scripts/minikube/06-deploy-operator-resources.sh ``` -Then visit [http://localhost:4001/](http://localhost:4001/). - -### Deploy subgraphs to prod - -**Note:** The image pull secret is automatically created by `setup_clusters.sh` if `TF_VAR_github_token` and `GITHUB_ORG` are set. `GITHUB_ORG` must be your GitHub username or organization name (e.g., `andywgarcia`), not a git URL. If you need to create it manually, use the commands below with your GitHub username and token. - -Deploy the subgraphs to production: - -```sh -# Deploy each subgraph service -for subgraph in checkout discovery inventory orders products reviews shipping users; do - kubectl create namespace $subgraph --dry-run=client -o yaml | kubectl apply -f - - - # Copy the image pull secret to each namespace (if it exists) - if kubectl get secret ghcr-secret -n default &>/dev/null; then - kubectl get secret ghcr-secret -n default -o yaml | \ - sed 's/namespace: default/namespace: '"$subgraph"'/' | \ - kubectl apply -f - - fi - - # Install (imagePullSecrets are configured in values.yaml) - helm install $subgraph subgraphs/$subgraph/deploy \ - -f subgraphs/$subgraph/deploy/environments/prod.yaml \ - -n $subgraph - - kubectl apply -f subgraphs/$subgraph/k8s/subgraph-prod.yaml -done -``` +This script: +- Deploys SupergraphSchema CRD (triggers composition) +- Deploys Supergraph CRD (deploys the Apollo Router) +- Waits for the router to be ready -Monitor the deployment: +Monitor router deployment: -```sh -# Check if subgraphs are registered -kubectl get subgraphs --all-namespaces +```bash +kubectl get supergraphs -n apollo +kubectl get pods -n apollo +kubectl describe supergraph reference-architecture-${ENVIRONMENT} -n apollo +``` +### Script 07: Deploy Ingress + +```bash +./scripts/minikube/07-deploy-ingress.sh ``` -You've successfully deployed your subgraphs! The next step is to deploy the Apollo Router and Coprocessor. +This script: +- Deploys an Ingress resource for external access +- Provides access URLs for the router +### Script 08: Deploy Client (Optional) -### Deploy the coprocessor and router +```bash +./scripts/minikube/08-deploy-client.sh +``` -Deploy the coprocessor first: +This script: +- Builds and deploys the client application +- Sets up ingress for client access -```sh -# Deploy to dev (with envsubst for variable substitution) -kubectx apollo-supergraph-k8s-dev -if command -v envsubst &> /dev/null; then - envsubst < deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo -else - sed "s|\${GITHUB_ORG}|${GITHUB_ORG:-apollosolutions}|g" deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo -fi +## Step 4: Access Your Supergraph -# Deploy to prod -kubectx apollo-supergraph-k8s-prod -if command -v envsubst &> /dev/null; then - envsubst < deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo -else - sed "s|\${GITHUB_ORG}|${GITHUB_ORG:-apollosolutions}|g" deploy/coprocessor/values.yaml | helm install coprocessor deploy/coprocessor -f - -n apollo -fi -``` +After running all scripts, you can access your supergraph in several ways: -Once the coprocessor is deployed, deploy the router using the operator Supergraph CRDs: +### Option 1: Using Ingress IP -**Note:** Make sure you've sourced the `.env` file from your terraform directory first to set `TF_VAR_apollo_graph_id`: +If ingress is configured, get the IP: -```sh -cd terraform/gcp # or terraform/aws, terraform/minikube -source .env -cd ../.. +```bash +kubectl get ingress router -n apollo ``` -Then deploy the operator resources: +Then access at `http://` -```sh -# Deploy to dev -kubectx apollo-supergraph-k8s-dev -cd deploy/operator-resources -./apply-resources.sh dev -cd ../.. +### Option 2: Using Minikube Service -# Deploy to prod -kubectx apollo-supergraph-k8s-prod -cd deploy/operator-resources -./apply-resources.sh prod -cd ../.. +```bash +minikube service reference-architecture-${ENVIRONMENT} -n apollo ``` -Or manually apply with kubectl: +This will open the router in your default browser. -```sh -# Deploy to dev (with envsubst for variable substitution) -kubectx apollo-supergraph-k8s-dev -envsubst < deploy/operator-resources/supergraphschema-dev.yaml | kubectl apply -f - -kubectl apply -f deploy/operator-resources/supergraph-dev.yaml -kubectl apply -f deploy/operator-resources/ingress-dev.yaml +### Option 3: Using Port Forwarding -# Deploy to prod -kubectx apollo-supergraph-k8s-prod -envsubst < deploy/operator-resources/supergraphschema-prod.yaml | kubectl apply -f - -kubectl apply -f deploy/operator-resources/supergraph-prod.yaml -kubectl apply -f deploy/operator-resources/ingress-prod.yaml +```bash +kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80 ``` -The operator will automatically deploy the router based on the composed supergraph schema. You can monitor the deployment: +Then access at `http://localhost:4000` -```sh -# Check router deployment status -kubectl get supergraphs -n apollo +### Verify Router is Working -# Check router pods -kubectl get pods -n apollo +Test the router health endpoint: -# Describe the supergraph to see conditions -kubectl describe supergraphs reference-architecture-prod -n apollo +```bash +curl http://localhost:4000/.well-known/apollo/server-health ``` -Once deployed, an ingress will be created to access the router. In the case of AWS, it will be a domain name, and in the case of GCP, it'll be an IP. - -Follow the below instructions for your cloud provider you are using. Please note that for both providers, the value for the ingress may take some time to become live, so you may need to give it a few minutes to process. +Or visit the router in Apollo Studio: +1. Go to [Apollo GraphOS Studio](https://studio.apollographql.com) +2. Select your graph +3. Navigate to the variant (e.g., "dev") +4. View the router status and metrics -#### GCP +## Creating Additional Environments -```sh -kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=http://$(kubectl get ingress -n apollo -o jsonpath="{.*.*.status.loadBalancer.ingress.*.ip}") -open $ROUTER_HOSTNAME -``` +To create a new environment (e.g., "prod"): -#### AWS +1. Set the environment variable: -```sh -kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=$(kubectl get ingress -n apollo -o jsonpath="{.*.*.status.loadBalancer.ingress.*.hostname}") -open http://$ROUTER_HOSTNAME +```bash +export ENVIRONMENT="prod" ``` -Upon running the above commands, you'll have the Router page open and you can make requests against your newly deployed supergraph! +2. Run scripts 02-07 again with the new environment: -**Note**: If using Explorer to run operations, you will need to set the client headers first: -``` -apollographql-client-name:apollo-client -apollographql-client-version:b +```bash +./scripts/minikube/02-setup-apollo-graph.sh # Creates prod variant +source .env +./scripts/minikube/03-setup-cluster.sh # Uses same cluster +./scripts/minikube/04-build-images.sh # Reuses images +./scripts/minikube/05-deploy-subgraphs.sh # Deploys to prod namespaces +./scripts/minikube/06-deploy-operator-resources.sh # Creates prod router +./scripts/minikube/07-deploy-ingress.sh # Updates ingress ``` -### Deploy the client +Each environment will have: +- Its own Apollo GraphOS variant +- Separate Kubernetes resources (namespaces, services, etc.) +- Its own router instance -The last step to getting fully configured is to deploy the client to both environments. To do so, we'll need our router ingress URL to point the client to. This can be pulled from the prior commands, so if you are using the same terminal session, feel free to skip the next set of commands. +## Troubleshooting -#### GCP +### Minikube won't start -```sh -kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=http://$(kubectl get ingress -n apollo -o jsonpath="{.*.*.status.loadBalancer.ingress.*.ip}") +```bash +minikube delete +minikube start ``` -Upon running the above commands, you'll have the Router page open and you can make requests against your newly deployed supergraph! +### Images not found -#### AWS +Ensure script 04 built the images and Docker is using Minikube's daemon: -```sh -kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=$(kubectl get ingress -n apollo -o jsonpath="{.*.*.status.loadBalancer.ingress.*.hostname}") +```bash +eval $(minikube docker-env) +docker images | grep local ``` -Once you have the router hostname, you'll need to set it as a secret in the GitHub repository created. +### Subgraphs not publishing schemas -```sh - gh variable set BACKEND_URL --body "$ROUTER_HOSTNAME" --repo $GITHUB_ORG/reference-architecture -``` +Check subgraph status: -Lastly, we'll need to deploy the client: - -```sh -gh workflow run "Deploy Client" --repo $GITHUB_ORG/reference-architecture \ - -f environment=prod \ - -f dry-run=false \ - -f debug=false +```bash +kubectl describe subgraph -n ``` -This will create another ingress specific to the client, so much like the router, you can run the following commands depending on your provider. As with the other ingress, this may take a few minutes to become active. +Look for errors in schema extraction or API key authentication. + +### Router not deploying -#### GCP +Check router status: -```sh -kubectx apollo-supergraph-k8s-prod -ROUTER_IP=$(kubectl get ingress -n client -o jsonpath="{.*.*.status.loadBalancer.ingress.*.ip}") -open http://$ROUTER_IP +```bash +kubectl describe supergraph reference-architecture-${ENVIRONMENT} -n apollo +kubectl logs -n apollo deployment/reference-architecture-${ENVIRONMENT} ``` -You should now have the full architecture deployed! +### Ingress not working -#### AWS +Ensure ingress addon is enabled: -```sh -kubectx apollo-supergraph-k8s-prod -ROUTER_HOSTNAME=$(kubectl get ingress -n client -o jsonpath="{.*.*.status.loadBalancer.ingress.*.hostname}") -open http://$ROUTER_HOSTNAME +```bash +minikube addons enable ingress +kubectl get pods -n ingress-nginx ``` -You should now have the full architecture deployed! \ No newline at end of file +## Next Steps + +- Read the [Operator Guide](./operator-guide.md) to understand how the Apollo GraphOS Operator works +- Explore your supergraph in [Apollo Studio](https://studio.apollographql.com) +- Make schema changes and see them automatically composed and deployed diff --git a/scripts/minikube/01-setup-minikube.sh b/scripts/minikube/01-setup-minikube.sh new file mode 100755 index 0000000..5919d19 --- /dev/null +++ b/scripts/minikube/01-setup-minikube.sh @@ -0,0 +1,48 @@ +#!/bin/bash +set -euo pipefail + +# Script 01: Setup Minikube Cluster +# This script installs and starts a Minikube cluster + +echo "=== Step 01: Setting up Minikube Cluster ===" + +# Check if minikube is installed +if ! command -v minikube &> /dev/null; then + echo "Error: minikube is not installed" + echo "Please install minikube: https://minikube.sigs.k8s.io/docs/start/" + exit 1 +fi + +# Check if kubectl is installed +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed" + echo "Please install kubectl: https://kubernetes.io/docs/tasks/tools/" + exit 1 +fi + +# Check if minikube cluster already exists +if minikube status &> /dev/null; then + echo "Minikube cluster already exists. Starting it..." + minikube start +else + echo "Creating new Minikube cluster..." + minikube start +fi + +# Enable ingress addon for external access +echo "Enabling ingress addon..." +minikube addons enable ingress + +# Verify cluster is running +echo "Verifying cluster status..." +minikube status + +# Configure kubectl to use minikube context +echo "Configuring kubectl context..." +kubectl config use-context minikube + +echo "" +echo "✓ Minikube cluster is ready!" +echo "" +echo "Next step: Run 02-setup-apollo-graph.sh to create your Apollo GraphOS graph" + diff --git a/scripts/minikube/02-setup-apollo-graph.sh b/scripts/minikube/02-setup-apollo-graph.sh new file mode 100755 index 0000000..f4a55d9 --- /dev/null +++ b/scripts/minikube/02-setup-apollo-graph.sh @@ -0,0 +1,176 @@ +#!/bin/bash +set -euo pipefail + +# Script 02: Setup Apollo GraphOS Graph +# This script creates an Apollo GraphOS graph and variants, and generates API keys + +echo "=== Step 02: Setting up Apollo GraphOS Graph ===" + +# Load environment variables from .env if it exists +if [ -f .env ]; then + echo "Loading environment variables from .env..." + source .env +else + echo "Warning: .env file not found. Make sure you've created it with your APOLLO_KEY." + echo "See Step 2 in docs/setup.md for instructions." +fi + +# Set defaults only for variables that have reasonable defaults +CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} + +# Validate required variables (don't default to empty strings) +if [[ -z "${APOLLO_KEY:-}" ]]; then + echo "Error: APOLLO_KEY is required" + echo "Please set APOLLO_KEY in your .env file or export it:" + echo " export APOLLO_KEY=\"your-apollo-personal-api-key\"" + echo "" + echo "See Step 2 in docs/setup.md for instructions on creating .env" + exit 1 +fi + +if [[ -z "${ENVIRONMENT:-}" ]]; then + echo "Error: ENVIRONMENT is required" + echo "Please set ENVIRONMENT in your .env file or export it:" + echo " export ENVIRONMENT=\"dev\"" + echo "" + echo "See Step 2 in docs/setup.md for instructions on creating .env" + exit 1 +fi + +# Check dependencies +if ! command -v jq &> /dev/null; then + echo "Error: jq is not installed" + echo "Please install jq: https://stedolan.github.io/jq/download/" + exit 1 +fi + +if ! command -v curl &> /dev/null; then + echo "Error: curl is not installed" + exit 1 +fi + +# Get account ID if not provided +if [[ -z "${ACCOUNT_ID:-}" ]]; then + echo "Fetching account ID from Apollo GraphOS..." + ACCOUNT_ARGS=( + --silent + --header "x-api-key: $APOLLO_KEY" + --header 'content-type: application/json' + --header 'apollographql-client-name: reference-architecture' + --url 'https://graphql.api.apollographql.com/api/graphql' + --data '{"query":"{ me { ... on User { memberships { permission account { id } } } } }"}' + ) + + if [[ -n "${HEADER:-}" ]]; then + ACCOUNT_ARGS+=(--header "$HEADER") + fi + + ACCOUNT_RESP=$(curl "${ACCOUNT_ARGS[@]}") + ACCOUNT_COUNT=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships | length") + + if [[ $ACCOUNT_COUNT > 1 ]]; then + echo "Apollo Studio returned multiple accounts. Please select one:" + echo "" + + # Display accounts with numbers + ACCOUNTS=($(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[].account.id")) + for i in "${!ACCOUNTS[@]}"; do + echo " $((i+1)). ${ACCOUNTS[$i]}" + done + echo "" + + # Prompt for selection + while true; do + read -p "Enter the number of the account to use (1-$ACCOUNT_COUNT): " SELECTION + if [[ "$SELECTION" =~ ^[0-9]+$ ]] && [ "$SELECTION" -ge 1 ] && [ "$SELECTION" -le "$ACCOUNT_COUNT" ]; then + ACCOUNT_ID="${ACCOUNTS[$((SELECTION-1))]}" + echo "Selected account: $ACCOUNT_ID" + break + else + echo "Invalid selection. Please enter a number between 1 and $ACCOUNT_COUNT." + fi + done + else + ACCOUNT_ID=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[0].account.id") + fi +fi + +echo "Creating graph on account $ACCOUNT_ID..." + +# Create graph +CREATE_ARGS=( + --silent + --header "x-api-key: $APOLLO_KEY" + --header 'content-type: application/json' + --header 'apollographql-client-name: reference-architecture' + --url 'https://graphql.api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation CreateGraph(\$accountId: ID!, \$newServiceId: ID!, \$name: String, \$onboardingArchitecture: OnboardingArchitecture) { newService(accountId: \$accountId, id: \$newServiceId, name: \$name, onboardingArchitecture: \$onboardingArchitecture) { id name apiKeys { token } } }\",\"variables\":{\"accountId\":\"$ACCOUNT_ID\",\"newServiceId\":\"$CLUSTER_PREFIX-$(echo $RANDOM | shasum | head -c 6)\",\"name\":\"Reference Architecture $(date +"%Y-%m-%d")\",\"onboardingArchitecture\":\"SUPERGRAPH\"}}" +) + +if [[ -n "${HEADER:-}" ]]; then + CREATE_ARGS+=(--header "$HEADER") +fi + +CREATE_RESP=$(curl "${CREATE_ARGS[@]}") +IS_SUCCESS=$(echo $CREATE_RESP | jq -r ".data.newService") + +if [[ "$IS_SUCCESS" == "null" ]]; then + echo "Error creating graph" + echo $CREATE_RESP | jq . + exit 1 +fi + +# Extract the actual graph ID and name from the response +GRAPH_ID=$(echo $CREATE_RESP | jq -r ".data.newService.id") +GRAPH_NAME=$(echo $CREATE_RESP | jq -r ".data.newService.name") +GRAPH_KEY=$(echo $CREATE_RESP | jq -r ".data.newService.apiKeys[0].token") + +echo "Created graph: $GRAPH_NAME (ID: $GRAPH_ID)" + +# Create Operator API key +echo "Creating Operator API key..." +CREATE_OPERATOR_KEY_ARGS=( + --silent + --header "x-api-key: $APOLLO_KEY" + --header "apollographql-client-name: reference-architecture" + --header "apollographql-client-version: 1.0" + --header 'content-type: application/json' + --url 'https://api.apollographql.com/api/graphql' + --data "{\"query\":\"mutation CreateOperatorKey(\$name: String!, \$type: GraphOsKeyType!, \$organizationId: ID!) { organization(id: \$organizationId) { createKey(name: \$name, type: \$type) { id keyName expiresAt token } } }\",\"variables\":{\"name\":\"operator\",\"type\":\"OPERATOR\",\"organizationId\":\"$ACCOUNT_ID\"}}" +) + +CREATE_OPERATOR_KEY_RESP=$(curl "${CREATE_OPERATOR_KEY_ARGS[@]}") +OPERATOR_KEY=$(echo $CREATE_OPERATOR_KEY_RESP | jq -r ".data.organization.createKey.token") + +if [[ "$OPERATOR_KEY" == "null" ]]; then + echo "Error creating operator key" + echo $CREATE_OPERATOR_KEY_RESP | jq . + exit 1 +fi + +echo "Operator key created successfully" + +# Save to .env file +ENV_FILE=".env" +if [ ! -f "$ENV_FILE" ]; then + touch "$ENV_FILE" +fi + +echo "" >> "$ENV_FILE" +echo "# Apollo GraphOS Configuration (generated by 02-setup-apollo-graph.sh)" >> "$ENV_FILE" +echo "export APOLLO_GRAPH_ID=\"$GRAPH_ID\"" >> "$ENV_FILE" +echo "export APOLLO_KEY=\"$GRAPH_KEY\"" >> "$ENV_FILE" +echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> "$ENV_FILE" +echo "" >> "$ENV_FILE" + +echo "" +echo "✓ Apollo GraphOS graph created successfully!" +echo "" +echo "Graph: $GRAPH_NAME" +echo "Graph ID: $GRAPH_ID" +echo "Environment: $ENVIRONMENT" +echo "" +echo "Configuration saved to .env file" +echo "" +echo "Next step: Run 03-setup-cluster.sh to setup the Kubernetes cluster" + diff --git a/scripts/minikube/03-setup-cluster.sh b/scripts/minikube/03-setup-cluster.sh new file mode 100755 index 0000000..12ea386 --- /dev/null +++ b/scripts/minikube/03-setup-cluster.sh @@ -0,0 +1,90 @@ +#!/bin/bash +set -euo pipefail + +# Script 03: Setup Kubernetes Cluster +# This script sets up namespaces, installs the Apollo GraphOS Operator, and creates secrets + +echo "=== Step 03: Setting up Kubernetes Cluster ===" + +# Load environment variables from .env if it exists +if [ -f .env ]; then + echo "Loading environment variables from .env..." + source .env +fi + +# Validate required variables +if [[ -z "${ENVIRONMENT:-}" ]]; then + echo "Error: ENVIRONMENT is required" + echo "Please set ENVIRONMENT in your .env file or export it:" + echo " export ENVIRONMENT=\"dev\"" + exit 1 +fi + +if [[ -z "${OPERATOR_KEY:-}" ]]; then + echo "Error: OPERATOR_KEY is not set" + echo "Please run 02-setup-apollo-graph.sh first to generate the operator key" + exit 1 +fi + +# Check if kubectl is available +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed" + exit 1 +fi + +# Check if helm is available +if ! command -v helm &> /dev/null; then + echo "Error: helm is not installed" + echo "Please install helm: https://helm.sh/docs/intro/install/" + exit 1 +fi + +# Verify cluster connection +echo "Verifying cluster connection..." +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + echo "Please ensure Minikube is running: minikube start" + exit 1 +fi + +# Create namespaces +echo "Creating namespaces..." +kubectl create namespace apollo-operator --dry-run=client -o yaml | kubectl apply -f - +kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - + +# Create operator API key secret +echo "Creating operator API key secret..." +kubectl create secret generic apollo-api-key \ + --from-literal="APOLLO_KEY=$OPERATOR_KEY" \ + -n apollo-operator \ + --dry-run=client -o yaml | kubectl apply -f - + +echo "Operator API key secret created" + +# Install Apollo GraphOS Operator using Helm +echo "Installing Apollo GraphOS Operator..." +helm upgrade --install --atomic apollo-operator \ + oci://registry-1.docker.io/apollograph/operator-chart \ + -n apollo-operator \ + --create-namespace \ + --wait \ + -f - < /dev/null; then + echo "Error: docker is not installed" + exit 1 +fi + +# Check if minikube is running +if ! minikube status &> /dev/null; then + echo "Error: Minikube is not running" + echo "Please run: minikube start" + exit 1 +fi + +# Configure docker to use Minikube's Docker daemon +echo "Configuring Docker to use Minikube's daemon..." +eval $(minikube docker-env) + +# List of subgraphs +SUBGRAPHS=("checkout" "discovery" "inventory" "orders" "products" "reviews" "shipping" "users") + +# Build each subgraph image +for subgraph in "${SUBGRAPHS[@]}"; do + echo "" + echo "Building ${subgraph} image..." + + if [ ! -d "subgraphs/${subgraph}" ]; then + echo "Warning: subgraphs/${subgraph} directory not found, skipping..." + continue + fi + + # Build the image + docker build -t "${subgraph}:local" "subgraphs/${subgraph}" + + if [ $? -eq 0 ]; then + echo "✓ Successfully built ${subgraph}:local" + else + echo "✗ Failed to build ${subgraph}:local" + exit 1 + fi +done + +# Build coprocessor image (for future use) +echo "" +echo "Building coprocessor image..." +if [ -d "coprocessor" ]; then + docker build -t "coprocessor:local" "coprocessor" + if [ $? -eq 0 ]; then + echo "✓ Successfully built coprocessor:local" + else + echo "✗ Failed to build coprocessor:local" + exit 1 + fi +else + echo "Warning: coprocessor directory not found, skipping..." +fi + +# Build client image (for future use) +echo "" +echo "Building client image..." +if [ -d "client" ]; then + docker build -t "client:local" "client" + if [ $? -eq 0 ]; then + echo "✓ Successfully built client:local" + else + echo "✗ Failed to build client:local" + exit 1 + fi +else + echo "Warning: client directory not found, skipping..." +fi + +echo "" +echo "✓ All images built successfully!" +echo "" +echo "Note: Images are loaded into Minikube's Docker daemon" +echo "" +echo "Next step: Run 05-deploy-subgraphs.sh to deploy subgraphs" + diff --git a/scripts/minikube/05-deploy-subgraphs.sh b/scripts/minikube/05-deploy-subgraphs.sh new file mode 100755 index 0000000..74bdda4 --- /dev/null +++ b/scripts/minikube/05-deploy-subgraphs.sh @@ -0,0 +1,109 @@ +#!/bin/bash +set -euo pipefail + +# Script 05: Deploy Subgraphs +# This script deploys all subgraphs using Helm and creates Subgraph CRDs with inline SDL + +echo "=== Step 05: Deploying Subgraphs ===" + +# Load environment variables from .env if it exists +if [ -f .env ]; then + echo "Loading environment variables from .env..." + source .env +fi + +# Validate required variables +if [[ -z "${ENVIRONMENT:-}" ]]; then + echo "Error: ENVIRONMENT is required" + echo "Please set ENVIRONMENT in your .env file or export it:" + echo " export ENVIRONMENT=\"dev\"" + exit 1 +fi + +# Check if kubectl is available +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed" + exit 1 +fi + +# Check if helm is available +if ! command -v helm &> /dev/null; then + echo "Error: helm is not installed" + exit 1 +fi + +# Verify cluster connection +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + exit 1 +fi + +# List of subgraphs +SUBGRAPHS=("checkout" "discovery" "inventory" "orders" "products" "reviews" "shipping" "users") + +# Deploy each subgraph +for subgraph in "${SUBGRAPHS[@]}"; do + echo "" + echo "Deploying ${subgraph}..." + + # Create namespace + kubectl create namespace "${subgraph}" --dry-run=client -o yaml | kubectl apply -f - + + # Use environment-specific values file if it exists, otherwise use default + VALUES_FILE="subgraphs/${subgraph}/deploy/environments/${ENVIRONMENT}.yaml" + if [ ! -f "$VALUES_FILE" ]; then + VALUES_FILE="subgraphs/${subgraph}/deploy/values.yaml" + fi + + # Verify values file exists + if [ ! -f "$VALUES_FILE" ]; then + echo "Error: Values file not found: $VALUES_FILE" + exit 1 + fi + + # Install using Helm + helm upgrade --install "${subgraph}" "subgraphs/${subgraph}/deploy" \ + -f "$VALUES_FILE" \ + -n "${subgraph}" \ + --wait + + # Create Subgraph CRD with inline SDL + echo "Creating Subgraph CRD for ${subgraph}..." + + SCHEMA_FILE="subgraphs/${subgraph}/schema.graphql" + if [ ! -f "$SCHEMA_FILE" ]; then + echo "Error: Schema file not found: $SCHEMA_FILE" + exit 1 + fi + + # Read schema and escape for YAML + SCHEMA_CONTENT=$(cat "$SCHEMA_FILE" | sed 's/^/ /') + + # Create Subgraph CRD YAML + cat < /dev/null; then + echo "Error: kubectl is not installed" + exit 1 +fi + +# Verify cluster connection +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + exit 1 +fi + +# Ensure apollo namespace exists +kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - + +# Resource name based on environment +RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" + +# Deploy SupergraphSchema +echo "Deploying SupergraphSchema..." +cat < /dev/null; then + echo "Error: kubectl is not installed" + exit 1 +fi + +# Verify cluster connection +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + exit 1 +fi + +# Check if ingress addon is enabled +if ! minikube addons list | grep -q "ingress.*enabled"; then + echo "Enabling ingress addon..." + minikube addons enable ingress + echo "Waiting for ingress controller to be ready..." + sleep 15 +fi + +# Wait for ingress controller pods to be ready +echo "Waiting for ingress controller to be ready..." +kubectl wait --namespace ingress-nginx \ + --for=condition=ready pod \ + --selector=app.kubernetes.io/component=controller \ + --timeout=120s || { + echo "Warning: Ingress controller may not be fully ready" +} + +# Resource name based on environment +RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" + +# Ensure apollo namespace exists +kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - + +# Deploy Ingress +echo "Deploying Ingress for router..." +cat </dev/null || echo "") + if [ -n "$INGRESS_IP" ]; then + break + fi + echo " Waiting for ingress IP... ($i/30)" + sleep 2 +done + +if [ -z "$INGRESS_IP" ]; then + echo "" + echo "Error: Ingress did not get an IP address after waiting" + echo "This may indicate an issue with the ingress controller" + echo "" + echo "Troubleshooting:" + echo " 1. Check ingress controller status: kubectl get pods -n ingress-nginx" + echo " 2. Check ingress status: kubectl describe ingress router -n apollo" + echo " 3. Try restarting ingress: minikube addons disable ingress && minikube addons enable ingress" + exit 1 +fi + +echo "" +echo "✓ Router is accessible at:" +echo " http://${INGRESS_IP}" +echo "" +echo "You can access the router at the IP above. If you want to use a hostname instead," +echo "you can add this to your /etc/hosts file:" +echo " ${INGRESS_IP} router.local" +echo "" +echo "Then access at: http://router.local" + diff --git a/scripts/minikube/08-deploy-client.sh b/scripts/minikube/08-deploy-client.sh new file mode 100755 index 0000000..5e1c1f5 --- /dev/null +++ b/scripts/minikube/08-deploy-client.sh @@ -0,0 +1,134 @@ +#!/bin/bash +set -euo pipefail + +# Script 08: Deploy Client +# This script deploys the client application (optional) + +echo "=== Step 08: Deploying Client Application ===" + +# Load environment variables from .env if it exists +if [ -f .env ]; then + echo "Loading environment variables from .env..." + source .env +fi + +# Validate required variables +if [[ -z "${ENVIRONMENT:-}" ]]; then + echo "Error: ENVIRONMENT is required" + echo "Please set ENVIRONMENT in your .env file or export it:" + echo " export ENVIRONMENT=\"dev\"" + exit 1 +fi + +# Check if kubectl is available +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed" + exit 1 +fi + +# Check if helm is available +if ! command -v helm &> /dev/null; then + echo "Error: helm is not installed" + exit 1 +fi + +# Verify cluster connection +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + exit 1 +fi + +# Get router URL for backend configuration +RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" +INGRESS_IP=$(kubectl get ingress router -n apollo -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "") + +if [ -z "$INGRESS_IP" ]; then + echo "Error: Router ingress not found or has no IP address" + echo "Please ensure script 07-deploy-ingress.sh completed successfully" + exit 1 +fi + +BACKEND_URL="http://${INGRESS_IP}" +echo "Using backend URL: $BACKEND_URL" + +# Create client namespace +kubectl create namespace client --dry-run=client -o yaml | kubectl apply -f - + +# Check if client directory exists +if [ ! -d "client" ]; then + echo "Warning: client directory not found, skipping client deployment" + exit 0 +fi + +# Build client with BACKEND_URL if Dockerfile supports it +if [ -f "client/Dockerfile" ] && grep -q "BACKEND_URL" "client/Dockerfile"; then + echo "Building client image with BACKEND_URL=$BACKEND_URL..." + eval $(minikube docker-env) + docker build --build-arg BACKEND_URL="$BACKEND_URL" -t client:local client +fi + +# Install using Helm +echo "Deploying client..." +helm upgrade --install client "deploy/client" \ + -n client \ + --wait + +# Deploy ingress for client +echo "Deploying ingress for client..." +cat </dev/null || echo "") + if [ -n "$CLIENT_IP" ]; then + break + fi + echo " Waiting for ingress IP... ($i/30)" + sleep 2 +done + +if [ -z "$CLIENT_IP" ]; then + echo "" + echo "Error: Client ingress did not get an IP address after waiting" + echo "This may indicate an issue with the ingress controller" + echo "" + echo "Troubleshooting:" + echo " 1. Check ingress controller status: kubectl get pods -n ingress-nginx" + echo " 2. Check ingress status: kubectl describe ingress client -n client" + exit 1 +fi + +echo "" +echo "✓ Client is accessible at:" +echo " http://${CLIENT_IP}" +echo "" +echo "You can access the client at the IP above. If you want to use a hostname instead," +echo "you can add this to your /etc/hosts file:" +echo " ${CLIENT_IP} client.local" +echo "" +echo "Then access at: http://client.local" +echo "" +echo "✓ Client deployment complete!" + diff --git a/subgraphs/checkout/deploy/values.yaml b/subgraphs/checkout/deploy/values.yaml index c068f7a..125164c 100644 --- a/subgraphs/checkout/deploy/values.yaml +++ b/subgraphs/checkout/deploy/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/implemented-reference-architecture/checkout - pullPolicy: Always - tag: main + repository: checkout + pullPolicy: Never + tag: local nameOverride: graphql fullnameOverride: graphql @@ -11,11 +11,6 @@ fullnameOverride: graphql serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -# Set this if your images are private and require authentication -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/subgraphs/discovery/deploy/values.yaml b/subgraphs/discovery/deploy/values.yaml index 672e1c2..d2ac4bf 100644 --- a/subgraphs/discovery/deploy/values.yaml +++ b/subgraphs/discovery/deploy/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/implemented-reference-architecture/discovery - pullPolicy: Always - tag: main + repository: discovery + pullPolicy: Never + tag: local nameOverride: graphql fullnameOverride: graphql @@ -11,11 +11,6 @@ fullnameOverride: graphql serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -# Set this if your images are private and require authentication -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/subgraphs/inventory/deploy/values.yaml b/subgraphs/inventory/deploy/values.yaml index 1fb8fd4..f9680ce 100644 --- a/subgraphs/inventory/deploy/values.yaml +++ b/subgraphs/inventory/deploy/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/implemented-reference-architecture/inventory - pullPolicy: Always - tag: main + repository: inventory + pullPolicy: Never + tag: local nameOverride: graphql fullnameOverride: graphql @@ -11,11 +11,6 @@ fullnameOverride: graphql serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -# Set this if your images are private and require authentication -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/subgraphs/orders/deploy/values.yaml b/subgraphs/orders/deploy/values.yaml index 9db7dba..580f161 100644 --- a/subgraphs/orders/deploy/values.yaml +++ b/subgraphs/orders/deploy/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/implemented-reference-architecture/orders - pullPolicy: Always - tag: main + repository: orders + pullPolicy: Never + tag: local nameOverride: graphql fullnameOverride: graphql @@ -11,11 +11,6 @@ fullnameOverride: graphql serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -# Set this if your images are private and require authentication -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/subgraphs/products/deploy/values.yaml b/subgraphs/products/deploy/values.yaml index ad7902e..f107d9d 100644 --- a/subgraphs/products/deploy/values.yaml +++ b/subgraphs/products/deploy/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/implemented-reference-architecture/products - pullPolicy: Always - tag: main + repository: products + pullPolicy: Never + tag: local nameOverride: graphql fullnameOverride: graphql @@ -11,11 +11,6 @@ fullnameOverride: graphql serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -# Set this if your images are private and require authentication -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/subgraphs/reviews/deploy/values.yaml b/subgraphs/reviews/deploy/values.yaml index 0827dc8..a1701b4 100644 --- a/subgraphs/reviews/deploy/values.yaml +++ b/subgraphs/reviews/deploy/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/implemented-reference-architecture/reviews - pullPolicy: Always - tag: main + repository: reviews + pullPolicy: Never + tag: local nameOverride: graphql fullnameOverride: graphql @@ -11,11 +11,6 @@ fullnameOverride: graphql serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -# Set this if your images are private and require authentication -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/subgraphs/shipping/deploy/values.yaml b/subgraphs/shipping/deploy/values.yaml index ff4c6a8..2d94127 100644 --- a/subgraphs/shipping/deploy/values.yaml +++ b/subgraphs/shipping/deploy/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/implemented-reference-architecture/shipping - pullPolicy: Always - tag: main + repository: shipping + pullPolicy: Never + tag: local nameOverride: graphql fullnameOverride: graphql @@ -11,11 +11,6 @@ fullnameOverride: graphql serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -# Set this if your images are private and require authentication -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/subgraphs/users/deploy/values.yaml b/subgraphs/users/deploy/values.yaml index 1cfca0e..61f2604 100644 --- a/subgraphs/users/deploy/values.yaml +++ b/subgraphs/users/deploy/values.yaml @@ -1,9 +1,9 @@ replicaCount: 3 image: - repository: ghcr.io/andywgarcia/implemented-reference-architecture/users - pullPolicy: Always - tag: main + repository: users + pullPolicy: Never + tag: local nameOverride: graphql fullnameOverride: graphql @@ -11,11 +11,6 @@ fullnameOverride: graphql serviceAccount: create: false -# Image pull secrets for GitHub Container Registry -# Set this if your images are private and require authentication -imagePullSecrets: - - name: ghcr-secret - podAnnotations: {} securityContext: {} diff --git a/terraform/aws/.env.sample b/terraform/aws/.env.sample deleted file mode 100644 index 0376c22..0000000 --- a/terraform/aws/.env.sample +++ /dev/null @@ -1,6 +0,0 @@ -export APOLLO_KEY="" -export GITHUB_ORG="" -export TF_VAR_github_token="" - -# Don't change these lines -export TF_VAR_project_id=$PROJECT_ID diff --git a/terraform/aws/.terraform.lock.hcl b/terraform/aws/.terraform.lock.hcl deleted file mode 100644 index 37a0fb5..0000000 --- a/terraform/aws/.terraform.lock.hcl +++ /dev/null @@ -1,145 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/aws" { - version = "5.64.0" - constraints = ">= 4.33.0, >= 5.46.0, >= 5.61.0" - hashes = [ - "h1:YH4I78rsS9t+YoGMPNzrM53aWi0Rb9Nud16iusrSXMg=", - "zh:1d361f8062c68c9d5ac14b0aa8390709542129b8a9b258e61bbbabc706078b44", - "zh:39dcbf53e3896bdd77071384c8fad4a5862c222c73f3bcf356aca488101f22fd", - "zh:3fad63505f0c5b6f01cc9a6ef02b2226983b79424126a9caf6eb724f654299f4", - "zh:53a8b90d00829cc27e3171a13a8ff1404ee0ea018e73f31d3f916d246cc39613", - "zh:5734c25ef5a04b40f3c1ac5f817f11e42ee3328f74dbc141c0e64afbb0acc834", - "zh:66ea14dbd87f291ce4a877123363933d3ca4022f209f885807a6689c22c24e80", - "zh:68e79654ad0894a3d93134c3377748ace3058d5fad5ec09d1e9a8f8f9b8a47ea", - "zh:7b74259d0ceef0c49cea6bcd171df997b6bad141085bbadded15b440faeb0eee", - "zh:988ebfb5d115dc57070b5abf2e4200ad49cde535f27fd2ba5e34cf9ab336a57f", - "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", - "zh:a0a2d4efe2835f0101a0a5024e044a3f28c00e10a8d87fce89c707ef6db75cea", - "zh:aecb3e4b9121771dee9cac7975bf5d0657b5f3e8b57788c455beaeb0f3c48d93", - "zh:d2d3393170b8ef761d3146f39f6788c4a3e876e6c5d4cedca4870c2680688ae6", - "zh:daba5a005c1baa4a5eefbfb86d43ccf880eb5b42e8136f0d932f55886d72bda0", - "zh:de16a6ff3baacdaf9609a0a89aa1913fc19cccaf5ee0fc1c49c5a075baa47c02", - ] -} - -provider "registry.terraform.io/hashicorp/cloudinit" { - version = "2.3.4" - constraints = ">= 2.0.0" - hashes = [ - "h1:S3j8poSaLbaftlKq2STBkQEkZH253ZLaHhBHBifdpBQ=", - "zh:09f1f1e1d232da96fbf9513b0fb5263bc2fe9bee85697aa15d40bb93835efbeb", - "zh:381e74b90d7a038c3a8dcdcc2ce8c72d6b86da9f208a27f4b98cabe1a1032773", - "zh:398eb321949e28c4c5f7c52e9b1f922a10d0b2b073b7db04cb69318d24ffc5a9", - "zh:4a425679614a8f0fe440845828794e609b35af17db59134c4f9e56d61e979813", - "zh:4d955d8608ece4984c9f1dacda2a59fdb4ea6b0243872f049b388181aab8c80a", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:a48fbee1d58d55a1f4c92c2f38c83a37c8b2f2701ed1a3c926cefb0801fa446a", - "zh:b748fe6631b16a1dafd35a09377c3bffa89552af584cf95f47568b6cd31fc241", - "zh:d4b931f7a54603fa4692a2ec6e498b95464babd2be072bed5c7c2e140a280d99", - "zh:f1c9337fcfe3a7be39d179eb7986c22a979cfb2c587c05f1b3b83064f41785c5", - "zh:f58fc57edd1ee3250a28943cd84de3e4b744cdb52df0356a53403fc240240636", - "zh:f5f50de0923ff530b03e1bca0ac697534d61bb3e5fc7f60e13becb62229097a9", - ] -} - -provider "registry.terraform.io/hashicorp/kubernetes" { - version = "2.32.0" - hashes = [ - "h1:3j4XBR5UWQA7xXaiEnzZp0bHbcwOhWetHYKTWIrUTI0=", - "zh:0e715d7fb13a8ad569a5fdc937b488590633f6942e986196fdb17cd7b8f7720e", - "zh:495fc23acfe508ed981e60af9a3758218b0967993065e10a297fdbc210874974", - "zh:4b930a8619910ef528bc90dae739cb4236b9b76ce41367281e3bc3cf586101c7", - "zh:5344405fde7b1febf0734052052268ee24e7220818155702907d9ece1c0697c7", - "zh:92ee11e8c23bbac3536df7b124456407f35c6c2468bc0dbab15c3fc9f414bd0e", - "zh:a45488fe8d5bb59c49380f398da5d109a4ac02ebc10824567dabb87f6102fda8", - "zh:a4a0b57cf719a4c91f642436882b7bea24d659c08a5b6f4214ce4fe6a0204caa", - "zh:b7a27a6d11ba956a2d7b0f7389a46ec857ebe46ae3aeee537250e66cac15bf03", - "zh:bf94ce389028b686bfa70a90f536e81bb776c5c20ab70138bbe5c3d0a04c4253", - "zh:d965b2608da0212e26a65a0b3f33c5baae46cbe839196be15d93f70061516908", - "zh:f441fc793d03057a17af8bdca8b26d54916645bc5c148f54e22a54ed39089e83", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} - -provider "registry.terraform.io/hashicorp/local" { - version = "2.5.1" - hashes = [ - "h1:/GAVA/xheGQcbOZEq0qxANOg+KVLCA7Wv8qluxhTjhU=", - "zh:0af29ce2b7b5712319bf6424cb58d13b852bf9a777011a545fac99c7fdcdf561", - "zh:126063ea0d79dad1f68fa4e4d556793c0108ce278034f101d1dbbb2463924561", - "zh:196bfb49086f22fd4db46033e01655b0e5e036a5582d250412cc690fa7995de5", - "zh:37c92ec084d059d37d6cffdb683ccf68e3a5f8d2eb69dd73c8e43ad003ef8d24", - "zh:4269f01a98513651ad66763c16b268f4c2da76cc892ccfd54b401fff6cc11667", - "zh:51904350b9c728f963eef0c28f1d43e73d010333133eb7f30999a8fb6a0cc3d8", - "zh:73a66611359b83d0c3fcba2984610273f7954002febb8a57242bbb86d967b635", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7ae387993a92bcc379063229b3cce8af7eaf082dd9306598fcd42352994d2de0", - "zh:9e0f365f807b088646db6e4a8d4b188129d9ebdbcf2568c8ab33bddd1b82c867", - "zh:b5263acbd8ae51c9cbffa79743fbcadcb7908057c87eb22fd9048268056efbc4", - "zh:dfcd88ac5f13c0d04e24be00b686d069b4879cc4add1b7b1a8ae545783d97520", - ] -} - -provider "registry.terraform.io/hashicorp/time" { - version = "0.12.0" - constraints = ">= 0.9.0" - hashes = [ - "h1:Os2Ok7txtlUJHh6Hg7o+74Ql85SnRb/fGmah22yXpLw=", - "zh:019a4c09af254ef80b72cf0d843dfe72d99483e227138cf5b514a1b9977ab4c3", - "zh:0ae310ec740ebc6f275529507d60bb747d0bf39e72fc5a2fa90d74486006132c", - "zh:13d6aec117f05237fbf8c7d91d6ebb19797b00aa87e7a812642d3ea4738a394e", - "zh:2e87abbc261f9317d0c2ef26e01d5fabf77679da7d2cac6f47df7d198f720989", - "zh:4a6d471176ce0264455aa7d5457b8702f78400010c201c1719708958a1b7b647", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:8978d5474a6da30bc0ad21c17db188d6918cacf3df3f6506b72ef3a268d53e2e", - "zh:b109efe138dfcb45dc04a9cc6809d185ab8b0ebc12040847c2dac430fda5af68", - "zh:b58e039b9106ac0a8de3c07f53b5279d7f0215fb35f2d23df642dfce0875382f", - "zh:ba2cbb2e515922d13efe3a46647be84f5426fcfcaa0f1520b3efeab8db847ed3", - "zh:c6c1ef1f26f25bca3abb5e07fa33dca37ed39cc26d0ff877964f2ffe5edd618c", - "zh:f8e171f923b7d2e789abd034072465dec3e6133c3a7644b7a7a965a74d52224e", - ] -} - -provider "registry.terraform.io/hashicorp/tls" { - version = "4.0.5" - constraints = ">= 3.0.0" - hashes = [ - "h1:zeG5RmggBZW/8JWIVrdaeSJa0OG62uFX5HY1eE8SjzY=", - "zh:01cfb11cb74654c003f6d4e32bbef8f5969ee2856394a96d127da4949c65153e", - "zh:0472ea1574026aa1e8ca82bb6df2c40cd0478e9336b7a8a64e652119a2fa4f32", - "zh:1a8ddba2b1550c5d02003ea5d6cdda2eef6870ece86c5619f33edd699c9dc14b", - "zh:1e3bb505c000adb12cdf60af5b08f0ed68bc3955b0d4d4a126db5ca4d429eb4a", - "zh:6636401b2463c25e03e68a6b786acf91a311c78444b1dc4f97c539f9f78de22a", - "zh:76858f9d8b460e7b2a338c477671d07286b0d287fd2d2e3214030ae8f61dd56e", - "zh:a13b69fb43cb8746793b3069c4d897bb18f454290b496f19d03c3387d1c9a2dc", - "zh:a90ca81bb9bb509063b736842250ecff0f886a91baae8de65c8430168001dad9", - "zh:c4de401395936e41234f1956ebadbd2ed9f414e6908f27d578614aaa529870d4", - "zh:c657e121af8fde19964482997f0de2d5173217274f6997e16389e7707ed8ece8", - "zh:d68b07a67fbd604c38ec9733069fbf23441436fecf554de6c75c032f82e1ef19", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} - -provider "registry.terraform.io/integrations/github" { - version = "6.2.3" - hashes = [ - "h1:qBH3fN/NItflQkBoIVdQa7n8WvBOuu7Ao20oeoAubKM=", - "zh:05874671652a260b12d784cc46b0eea156f493a5f12e00368d1f6cb319156257", - "zh:0c7a3cae5a66e5c5efc3b25ba646a0d46bfe1fd3edba1f5a75f51aede85a9d1b", - "zh:174310010d08f13e36e53ff18e44a21dd040c89884ef190a192c6ce27926a912", - "zh:23d1d8731e518354ce6a83419f49101aece63882b0ca7c489f3c598cc6ea5d5e", - "zh:4e88953816daf11ab1681c32c7988d4e29476fc44f0959fe03173532cf5044de", - "zh:6fab07734ccf27f5afee4442abae2d33245eabf35519032ce1e2aad6961a640a", - "zh:7b2f324b918e161c892c29ee80d36c48ca8b891b8047e132fc701ca741e5ae72", - "zh:8ef4f0d691ade98082ef1f6b36e556468e5ab26e60021f0de0fb22e3acdfd990", - "zh:8f0f3e139faa8f2b9075bb9978dd683f4bab5ac91171bbb969addd04d7f0b90f", - "zh:97cb6d7fdf640237cc2f0ab830db8f878770968c59fd28298e9dddb8b9e6294d", - "zh:a17038d8747c6bb660e4c5981e8ffbbc33c66ba164868fd35d442e7f828a1e01", - "zh:aa9f4b7d947f7b11277b4e9ba7147f5594cf60a6589b7aac4344f73d1400d1c0", - "zh:c780b951e14d583ef6ffef9a934831b56ee157c50ed8e969c676a636810f7db1", - "zh:d8497bb2986fd76107b7208b33cc39281797164fdea09453e987b969a461befb", - "zh:fbd1fee2c9df3aa19cf8851ce134dea6e45ea01cb85695c1726670c285797e25", - ] -} diff --git a/terraform/aws/create_graph.sh b/terraform/aws/create_graph.sh deleted file mode 100755 index 158b0de..0000000 --- a/terraform/aws/create_graph.sh +++ /dev/null @@ -1,246 +0,0 @@ -#/bin/bash -set -euo pipefail - -APOLLO_KEY=${APOLLO_KEY:-""} -CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -ACCOUNT_ID=${ACCOUNT_ID:-""} -GRAPH_ID=$CLUSTER_PREFIX-$(echo $RANDOM | shasum | head -c 6) -HEADER=${HEADER:-""} -VARIANTS=("dev" "prod") - -if [[ "$APOLLO_KEY" == "" ]]; then - echo "Must provide APOLLO_KEY in environment" 1>&2 - exit 1 -fi - -if [[ $(which jq) == "" ]]; then - echo "please install jq before continuing: https://stedolan.github.io/jq/" - exit 1 -fi - -if [[ $(which rover) == "" ]]; then - echo "rover not installed; see: https://www.apollographql.com/docs/rover/getting-started/" - exit 1 -fi - -# if an account id is not provided, fetch it from Studio -if [[ $ACCOUNT_ID == "" ]]; then - ACCOUNT_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --url 'https://graphql.api.apollographql.com/api/graphql' - --data '{"query":"{ me { ... on User { memberships { permission account { id } } } } }"}' - ) - - if [[ $HEADER != "" ]]; then - ACCOUNT_ARGS+=(--header "$HEADER") - fi - - ACCOUNT_RESP=$(curl "${ACCOUNT_ARGS[@]}") - ACCOUNT_COUNT=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships | length") - - # if more than one account exists, exit early - if [[ $ACCOUNT_COUNT > 1 ]]; then - echo "Apollo Studio returned more than one account." - echo "Specify an account ID with ACCOUNT_ID=myaccount $0" - echo "Accounts: " - echo $(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[].account.id") - exit 1 - fi - - ACCOUNT_ID=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[0].account.id") -fi - -echo "Creating graph $GRAPH_ID on account $ACCOUNT_ID..." - -CREATE_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --url 'https://graphql.api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreateGraph(\$accountId: ID!, \$newServiceId: ID!, \$name: String, \$onboardingArchitecture: OnboardingArchitecture) { newService(accountId: \$accountId, id: \$newServiceId, name: \$name, onboardingArchitecture: \$onboardingArchitecture) { id apiKeys { token } } }\",\"variables\":{\"accountId\":\"$ACCOUNT_ID\",\"newServiceId\":\"$GRAPH_ID\",\"name\":\"Build a Supergraph $(date +"%Y-%m-%d")\",\"onboardingArchitecture\":\"SUPERGRAPH\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_ARGS+=(--header "$HEADER") -fi - -CREATE_RESP=$(curl "${CREATE_ARGS[@]}") - -IS_SUCCESS=$(echo $CREATE_RESP | jq -r ".data.newService") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating graph" - echo $CREATE_RESP | jq . - exit 1 -fi - -GRAPH_KEY=$(echo $CREATE_RESP | jq -r ".data.newService.apiKeys[0].token") - -# Create Operator API key for the operator to use -echo "Creating Operator API key..." - -CREATE_OPERATOR_KEY_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header "apollographql-client-name: reference-architecture" - --header "apollographql-client-version: 1.0" - --header 'content-type: application/json' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreateOperatorKey(\$name: String!, \$type: GraphOsKeyType!, \$organizationId: ID!) { organization(id: \$organizationId) { createKey(name: \$name, type: \$type) { id keyName expiresAt token } } }\",\"variables\":{\"name\":\"operator\",\"type\":\"OPERATOR\",\"organizationId\":\"$ACCOUNT_ID\"}}" -) - -CREATE_OPERATOR_KEY_RESP=$(curl "${CREATE_OPERATOR_KEY_ARGS[@]}") - -OPERATOR_KEY=$(echo $CREATE_OPERATOR_KEY_RESP | jq -r ".data.organization.createKey.token") -if [[ "$OPERATOR_KEY" == "null" ]]; then - echo "Error creating operator key" - echo $CREATE_OPERATOR_KEY_RESP | jq . - exit 1 -fi - -echo "Operator key created successfully" - -# Note: Subgraph schema publishing is now handled by the Apollo GraphOS Operator -# when Subgraph CRDs are deployed. No manual rover publish commands needed. -# We create variants by publishing dummy subgraphs to them. - -echo "Creating dev and prod variants by publishing dummy subgraphs..." - -for variant in "${VARIANTS[@]}"; do - echo "Creating variant: $variant" - - PUBLISH_ARGS=( - --silent - --header "x-api-key: $GRAPH_KEY" - --header "apollographql-client-name: reference-architecture" - --header "apollographql-client-version: 1.0" - --header 'content-type: application/json' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation PublishSubgraph(\$graphId: ID!, \$graphVariant: String!, \$name: String!, \$revision: String!, \$activePartialSchema: PartialSchemaInput!, \$url: String) { graph(id: \$graphId) { publishSubgraph(graphVariant: \$graphVariant, name: \$name, revision: \$revision, activePartialSchema: \$activePartialSchema, url: \$url) { subgraphsCreated errors { message locations { column line } code } wasCreated wasUpdated } } }\",\"variables\":{\"graphId\":\"$GRAPH_ID\",\"graphVariant\":\"$variant\",\"name\":\"temp-subgraph\",\"revision\":\"1\",\"activePartialSchema\":{\"sdl\":\"type Query { temp: String }\"},\"url\":\"http://localhost:1234\"}}" - ) - - PUBLISH_RESP=$(curl "${PUBLISH_ARGS[@]}") - - if [[ $(echo $PUBLISH_RESP | jq -r ".data.graph.publishSubgraph.errors | length") > 0 ]]; then - echo "Error creating variant $variant" - echo $PUBLISH_RESP | jq . - exit 1 - fi - - echo "Created variant: $variant" -done - -# Create persisted query lists for dev and prod -# dev -CREATE_PQ_ARGS_DEV=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_PQ_ARGS_DEV+=(--header "$HEADER") -fi - -CREATE_PQ_DEV_RESP=$(curl "${CREATE_PQ_ARGS_DEV[@]}") - -IS_SUCCESS=$(echo $CREATE_PQ_DEV_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating pq list for dev" - echo $CREATE_PQ_DEV_RESP | jq . - exit 1 -fi - -DEV_PQ_ID=$(echo $CREATE_PQ_DEV_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList.id") - -UPDATE_DEV_PQ_LIST_ARGS=( - --silent - --request POST - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - UPDATE_DEV_PQ_LIST_ARGS+=(--header "$HEADER") -fi - -UPDATE_DEV_PQ_LIST_RESP=$(curl "${UPDATE_DEV_PQ_LIST_ARGS[@]}") - -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for dev" - echo $UPDATE_DEV_PQ_LIST_RESP | jq . - exit 1 -fi - -# prod -CREATE_PQ_ARGS_PROD=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_PQ_ARGS_DEV+=(--header "$HEADER") -fi - -CREATE_PQ_PROD_RESP=$(curl "${CREATE_PQ_ARGS_PROD[@]}") -IS_SUCCESS=$(echo $CREATE_PQ_PROD_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating pq list for prod" - echo $CREATE_PQ_PROD_RESP | jq . - exit 1 -fi - -PROD_PQ_ID=$(echo $CREATE_PQ_PROD_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList.id") - -UPDATE_PROD_PQ_LIST_ARGS=( - --silent - --request POST - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" -) -if [[ $HEADER != "" ]]; then - UPDATE_PROD_PQ_LIST_ARGS+=(--header "$HEADER") -fi - -UPDATE_PROD_PQ_LIST_RESP=$(curl "${UPDATE_PROD_PQ_LIST_ARGS[@]}") - -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for prod" - echo $UPDATE_PROD_PQ_LIST_RESP | jq . - exit 1 -fi - -echo '' -echo "Adding Apollo credentials as Terraform variables in .env..." -echo '' >> .env -echo "export TF_VAR_apollo_key=\"$GRAPH_KEY\"" >> .env -echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env -echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env -echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env -echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo '' >> .env -echo 'Re-run `source .env` to load them.' diff --git a/terraform/aws/eks_dev.tf b/terraform/aws/eks_dev.tf deleted file mode 100644 index 942b5d9..0000000 --- a/terraform/aws/eks_dev.tf +++ /dev/null @@ -1,69 +0,0 @@ -provider "kubernetes" { - alias = "dev" - host = module.eks_dev.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_dev.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks_dev.cluster_name] - } -} - -module "eks_dev" { - source = "terraform-aws-modules/eks/aws" - version = "~> 19.21" - - providers = { - kubernetes = kubernetes.dev - } - cluster_name = "${var.demo_name}-dev" - cluster_endpoint_public_access = true - - cluster_addons = { - coredns = { - most_recent = true - } - kube-proxy = { - most_recent = true - } - vpc-cni = { - most_recent = true - } - } - - vpc_id = module.vpc["dev"].vpc_id - subnet_ids = module.vpc["dev"].private_subnets - control_plane_subnet_ids = module.vpc["dev"].intra_subnets - - manage_aws_auth_configmap = true - - eks_managed_node_group_defaults = { - ami_type = "AL2_x86_64" - instance_types = ["${var.demo_stages["dev"].node_type}"] - } - - eks_managed_node_groups = { - primary = { - disk_size = 20 - min_size = var.demo_stages["dev"].min_nodes - max_size = var.demo_stages["dev"].max_nodes - desired_size = var.demo_stages["dev"].min_nodes - - instance_types = ["${var.demo_stages["dev"].node_type}"] - } - } - - create_iam_role = true - iam_role_name = "${substr(var.demo_name, 0, 12)}-dev-eks-deploy" - iam_role_use_name_prefix = false - - aws_auth_users = [ - { - userarn = aws_iam_user.eks_user.arn - username = aws_iam_user.eks_user.name - groups = ["system:masters"] - } - ] -} diff --git a/terraform/aws/eks_prod.tf b/terraform/aws/eks_prod.tf deleted file mode 100644 index ba49e50..0000000 --- a/terraform/aws/eks_prod.tf +++ /dev/null @@ -1,68 +0,0 @@ -provider "kubernetes" { - alias = "prod" - host = module.eks_prod.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_prod.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks_prod.cluster_name] - } -} - -module "eks_prod" { - source = "terraform-aws-modules/eks/aws" - version = "~> 19.21" - - providers = { - kubernetes = kubernetes.prod - } - cluster_name = "${var.demo_name}-prod" - cluster_endpoint_public_access = true - - cluster_addons = { - coredns = { - most_recent = true - } - kube-proxy = { - most_recent = true - } - vpc-cni = { - most_recent = true - } - } - - vpc_id = module.vpc["prod"].vpc_id - subnet_ids = module.vpc["prod"].private_subnets - control_plane_subnet_ids = module.vpc["prod"].intra_subnets - - manage_aws_auth_configmap = true - - eks_managed_node_group_defaults = { - ami_type = "AL2_x86_64" - instance_types = ["${var.demo_stages["prod"].node_type}"] - } - - eks_managed_node_groups = { - primary = { - disk_size = 20 - min_size = var.demo_stages["prod"].min_nodes - max_size = var.demo_stages["prod"].max_nodes - desired_size = var.demo_stages["prod"].min_nodes - - instance_types = ["${var.demo_stages["prod"].node_type}"] - } - } - - create_iam_role = true - iam_role_name = "${substr(var.demo_name, 0, 12)}-prod-eks-deploy" - iam_role_use_name_prefix = false - aws_auth_users = [ - { - userarn = aws_iam_user.eks_user.arn - username = aws_iam_user.eks_user.name - groups = ["system:masters"] - } - ] -} diff --git a/terraform/aws/github.tf b/terraform/aws/github.tf deleted file mode 100644 index 968b2c0..0000000 --- a/terraform/aws/github.tf +++ /dev/null @@ -1,57 +0,0 @@ -provider "github" { - token = var.github_token -} - -# Repository -resource "github_repository" "repo" { - name = "reference-architecture" - description = "Apollo supergraph reference architecture repository" - visibility = "public" - depends_on = [ - module.eks_dev, - module.eks_prod, - ] - template { - owner = "apollosolutions" - repository = "reference-architecture" - } -} - -### GH Action Secrets ### - -# repo secrets -resource "github_actions_secret" "apollo_graph_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_GRAPH_ID" - plaintext_value = var.apollo_graph_id -} -resource "github_actions_secret" "apollo_key" { - repository = github_repository.repo.name - secret_name = "APOLLO_KEY" - plaintext_value = var.apollo_key -} -resource "github_actions_secret" "pq_dev_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_PQ_DEV_ID" - plaintext_value = var.pq_dev_id -} -resource "github_actions_secret" "pq_prod_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_PQ_PROD_ID" - plaintext_value = var.pq_prod_id -} -resource "github_actions_secret" "cluster_prefix" { - repository = github_repository.repo.name - secret_name = "CLUSTER_PREFIX" - plaintext_value = var.demo_name -} -resource "github_actions_secret" "aws_access_key" { - repository = github_repository.repo.name - secret_name = "AWS_ACCESS_KEY" - plaintext_value = aws_iam_access_key.eks_user_key.id -} -resource "github_actions_secret" "aws_secret_key" { - repository = github_repository.repo.name - secret_name = "AWS_SECRET_KEY" - plaintext_value = aws_iam_access_key.eks_user_key.secret -} diff --git a/terraform/aws/iam.tf b/terraform/aws/iam.tf deleted file mode 100644 index 68330e6..0000000 --- a/terraform/aws/iam.tf +++ /dev/null @@ -1,29 +0,0 @@ -resource "aws_iam_user" "eks_user" { - name = "${var.demo_name}-eks-user" -} - -resource "aws_iam_access_key" "eks_user_key" { - user = aws_iam_user.eks_user.name -} - -resource "aws_iam_user_policy" "eks_user_policy" { - name = "eks-user-describe-policy" - user = aws_iam_user.eks_user.name - - policy = jsonencode({ - Version = "2012-10-17" - Statement = [{ - Effect = "Allow" - Action = [ - "eks:DescribeCluster", - "eks:ListClusters", - ] - Resource = "*" - }] - }) -} - -resource "local_file" "eks_user_key_file" { - content = "AWS_ACCESS_KEY_ID=${aws_iam_access_key.eks_user_key.id}\nAWS_SECRET_ACCESS_KEY=${aws_iam_access_key.eks_user_key.secret}" - filename = "eks_user_creds.txt" -} diff --git a/terraform/aws/main.tf b/terraform/aws/main.tf deleted file mode 100644 index 3911c84..0000000 --- a/terraform/aws/main.tf +++ /dev/null @@ -1,14 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - github = { - source = "integrations/github" - } - } -} - -provider "aws" { - region = var.project_region -} diff --git a/terraform/aws/outputs.tf b/terraform/aws/outputs.tf deleted file mode 100644 index 179ecb3..0000000 --- a/terraform/aws/outputs.tf +++ /dev/null @@ -1,14 +0,0 @@ -output "kubernetes_dev" { - value = module.eks_dev.cluster_name - description = "Dev K8s Cluster" -} - -output "kubernetes_prod" { - value = module.eks_prod.cluster_name - description = "Prod K8s Cluster" -} - -output "repo" { - value = github_repository.repo.html_url - description = "Repository URL" -} diff --git a/terraform/aws/setup_clusters.sh b/terraform/aws/setup_clusters.sh deleted file mode 100755 index 8f563b9..0000000 --- a/terraform/aws/setup_clusters.sh +++ /dev/null @@ -1,123 +0,0 @@ -#/bin/bash -set -euxo pipefail - -# default vars -CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -PROJECT_REGION=${PROJECT_REGION:-"us-east1"} -PROJECT_CLUSTERS=("${CLUSTER_PREFIX}-dev" "${CLUSTER_PREFIX}-prod") -# end default vars - -if [[ $(which gcloud) == "" ]]; then - echo "gcloud not installed" - exit 1 -fi - -if [[ $(which kubectl) == "" ]]; then - echo "kubectl not installed" - exit 1 -fi - -if [[ $(which kubectx) == "" ]]; then - echo "kubectx not installed" - exit 1 -fi - -if [[ -z "$PROJECT_ID" ]]; then - echo "Must provide PROJECT_ID in environment" 1>&2 - exit 1 -fi - -environment_setup(){ - echo "Configuring Kubeconfig for ${1}..." - gcloud container clusters get-credentials ${1} --zone ${PROJECT_REGION} --project ${PROJECT_ID} - - # short context aliases: supports `kubectx apollo-supergraph-k8s-dev` - kubectx ${1}=. - - # monitoring setup: namespace, service account, and binding - # the service account name matches the otel collector's service account in its helm chart - kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - - kubectl create serviceaccount -n "monitoring" "metrics-writer" --dry-run=client -o yaml | kubectl apply -f - - kubectl annotate serviceaccount -n "monitoring" "metrics-writer" "iam.gke.io/gcp-service-account=${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" --overwrite - gcloud iam service-accounts add-iam-policy-binding \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${PROJECT_ID}.svc.id.goog[monitoring/metrics-writer]" \ - "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" - - # Apollo GraphOS Operator setup - echo "Installing Apollo GraphOS Operator..." - kubectl create namespace apollo-operator --dry-run=client -o yaml | kubectl apply -f - - kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - - - # Create operator API key secret (requires OPERATOR_KEY to be set) - if [[ -n "$OPERATOR_KEY" ]]; then - kubectl create secret generic apollo-api-key \ - --from-literal="APOLLO_KEY=$OPERATOR_KEY" \ - -n apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - echo "Operator API key secret created" - else - echo "Warning: OPERATOR_KEY not set. Operator secret not created." - fi - - # Create GitHub Container Registry image pull secret (optional, requires TF_VAR_github_token) - if [[ -n "$TF_VAR_github_token" && -n "$GITHUB_ORG" ]]; then - echo "Creating GitHub Container Registry image pull secret..." - # Create in default namespace - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=default \ - --dry-run=client -o yaml | kubectl apply -f - - - # Create in apollo namespace - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=apollo \ - --dry-run=client -o yaml | kubectl apply -f - - - # Create in apollo-operator namespace and patch service account - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - - kubectl patch serviceaccount apollo-operator -n apollo-operator \ - -p '{"imagePullSecrets":[{"name":"ghcr-secret"}]}' || true - - echo "GitHub Container Registry image pull secret created" - else - echo "Warning: TF_VAR_github_token and/or GITHUB_ORG not set. Image pull secret not created." - echo " Subgraphs may fail to pull images if they are private. Set these variables to enable image pull authentication." - fi - - # Install operator using Helm - if [[ $(which helm) != "" ]]; then - helm upgrade --install --atomic apollo-operator \ - oci://registry-1.docker.io/apollograph/operator-chart \ - -n apollo-operator \ - --create-namespace \ - -f - < stage - } - name = "${var.demo_name}-${each.value.name}-vpc" - cidr = each.value.cidr - - private_subnets = each.value.private_subnets - public_subnets = each.value.public_subnets - azs = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1]] - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - enable_flow_log = true - create_flow_log_cloudwatch_iam_role = true - create_flow_log_cloudwatch_log_group = true - - public_subnet_tags = { - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/role/internal-elb" = 1 - } - - tags = { - Terraform = "true" - Environment = each.key - } -} -data "aws_availability_zones" "available" { - state = "available" -} diff --git a/terraform/gcp/.env.sample b/terraform/gcp/.env.sample deleted file mode 100644 index fa0d9d7..0000000 --- a/terraform/gcp/.env.sample +++ /dev/null @@ -1,10 +0,0 @@ -# see https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke -export USE_GKE_GCLOUD_AUTH_PLUGIN=True - -export PROJECT_ID="" -export APOLLO_KEY="" -export GITHUB_ORG="" -export TF_VAR_github_token="" - -# Don't change these lines -export TF_VAR_project_id=$PROJECT_ID diff --git a/terraform/gcp/.terraform.lock.hcl b/terraform/gcp/.terraform.lock.hcl deleted file mode 100644 index 21255c0..0000000 --- a/terraform/gcp/.terraform.lock.hcl +++ /dev/null @@ -1,123 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/google" { - version = "7.7.0" - constraints = ">= 3.33.0, >= 3.83.0, >= 4.25.0, >= 4.47.0, != 4.49.0, != 4.50.0, >= 4.64.0, >= 6.19.0, >= 7.0.0, < 8.0.0" - hashes = [ - "h1:XXIN2CJQVPeX2up/Fc0fxgvFvKPZ8n7EMYfHgUg19Qg=", - "zh:1bb1991d8518c1a48bbb0bc13c2f8a3ff4087f04f0290c088bd409c45372fac0", - "zh:3e90926772a7336773d0746b369881723356f8f9bcdc61f0c5edf6cf877c528a", - "zh:6024ab68e49b04ffc413fcb17d702775182f51277c55e05843ae2b954c430825", - "zh:84580559e0652fb64e51255f62c0c7be15fe04b9299e9d37af0edc88da3e9620", - "zh:85adff74d4cee4a75695821d4221dd1ace7ff72c9b38c0dd645542f0341ed875", - "zh:985b01b93156c57b7f8d7d5f141166e0c880a4cafcbd159d2913b79d803c42e6", - "zh:a62e67d8652ab9209d15b1ce91a7128805756fb5ab59a8da31198d3bcb7ab6f6", - "zh:a92a6fa87f39315a1d6b13a504e9f3bea0b71ffe42a3caefa78556748ea4b5c7", - "zh:bbfd9dca8c2bfeb92dcec8816de04692c91169aa97a7aabc2dd37191a861927e", - "zh:d90dad9c4ccba06404b49c0d2c1cbf866a3fa64f5b6b2a17ee6f4711892544cb", - "zh:e05f2ff63373f81b93e82b4c1aebebf620cfb368aeca7b1629590aa0f92c288a", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} - -provider "registry.terraform.io/hashicorp/google-beta" { - version = "7.7.0" - constraints = ">= 4.64.0, >= 6.19.0, < 8.0.0" - hashes = [ - "h1:WGz3ddOpHsRrti0cChTB/nA97D2JB0lBm6qq2OGyemg=", - "zh:0a8171641254cc40e3e36c3051dbd647345cddeceaa093cc5a5b546337d56c9c", - "zh:0dd3a9aade731fe67750700d98a0e8c01f5af16fd3d226618515ed209b901fd4", - "zh:2368351c948f8c0ebde61f8b1e966b2ef35482b85e7dffc3bbb9728dd0b06cd8", - "zh:2bc33fec30fa72a2d6dcfe79f00fa7dd4613df675e54bf4e0fe1152ab48143a9", - "zh:372531f20e72e62472d98d4c90cf84bf21aa4af035cb529d7705d07f1c7eb3b3", - "zh:3815b9b785447f6b89f2d98d12d9dd3ee5f0492faf002c99ff5bb0f0bbc10b6f", - "zh:38b557dea7a767dcbc2efa2e078950c3b0b33000d1d12f03ea8e31e0219eed86", - "zh:b3a23230bfebdc92dcdfc01f21928467a2e301545dfbb2f91b46bfb63dc9c2b2", - "zh:c8a9d6daaaa9b01dcf25f9c0615104bd9e7aeaa1d343d812ab3df8e824ca56fa", - "zh:d07001dfec4cb07e82ac6d25bc5aa33ef9a936b802c1e03ce53408fc1fe86c11", - "zh:df6d1aa1de7de33688dd6f0658cb8a5601386cd6aea5d7b51cd02e6c5d15503b", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} - -provider "registry.terraform.io/hashicorp/kubernetes" { - version = "2.38.0" - constraints = "~> 2.10" - hashes = [ - "h1:soK8Lt0SZ6dB+HsypFRDzuX/npqlMU6M0fvyaR1yW0k=", - "zh:0af928d776eb269b192dc0ea0f8a3f0f5ec117224cd644bdacdc682300f84ba0", - "zh:1be998e67206f7cfc4ffe77c01a09ac91ce725de0abaec9030b22c0a832af44f", - "zh:326803fe5946023687d603f6f1bab24de7af3d426b01d20e51d4e6fbe4e7ec1b", - "zh:4a99ec8d91193af961de1abb1f824be73df07489301d62e6141a656b3ebfff12", - "zh:5136e51765d6a0b9e4dbcc3b38821e9736bd2136cf15e9aac11668f22db117d2", - "zh:63fab47349852d7802fb032e4f2b6a101ee1ce34b62557a9ad0f0f0f5b6ecfdc", - "zh:924fb0257e2d03e03e2bfe9c7b99aa73c195b1f19412ca09960001bee3c50d15", - "zh:b63a0be5e233f8f6727c56bed3b61eb9456ca7a8bb29539fba0837f1badf1396", - "zh:d39861aa21077f1bc899bc53e7233262e530ba8a3a2d737449b100daeb303e4d", - "zh:de0805e10ebe4c83ce3b728a67f6b0f9d18be32b25146aa89116634df5145ad4", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - "zh:faf23e45f0090eef8ba28a8aac7ec5d4fdf11a36c40a8d286304567d71c1e7db", - ] -} - -provider "registry.terraform.io/hashicorp/local" { - version = "2.5.3" - hashes = [ - "h1:MCzg+hs1/ZQ32u56VzJMWP9ONRQPAAqAjuHuzbyshvI=", - "zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927", - "zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e", - "zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf", - "zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d", - "zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09", - "zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f", - "zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1", - "zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6", - "zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47", - "zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82", - ] -} - -provider "registry.terraform.io/hashicorp/random" { - version = "3.7.2" - constraints = ">= 2.1.0" - hashes = [ - "h1:KG4NuIBl1mRWU0KD/BGfCi1YN/j3F7H4YgeeM7iSdNs=", - "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", - "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", - "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", - "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", - "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", - "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", - "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", - "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", - "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", - "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", - ] -} - -provider "registry.terraform.io/integrations/github" { - version = "6.6.0" - hashes = [ - "h1:P4SRG4605PvPKASeDu1lW49TTz1cCGsjQ7qbOBgNd6I=", - "zh:0b1b5342db6a17de7c71386704e101be7d6761569e03fb3ff1f3d4c02c32d998", - "zh:2fb663467fff76852126b58315d9a1a457e3b04bec51f04bf1c0ddc9dfbb3517", - "zh:4183e557a1dfd413dae90ca4bac37dbbe499eae5e923567371f768053f977800", - "zh:48b2979f88fb55cdb14b7e4c37c44e0dfbc21b7a19686ce75e339efda773c5c2", - "zh:5d803fb06625e0bcf83abb590d4235c117fa7f4aa2168fa3d5f686c41bc529ec", - "zh:6f1dd094cbab36363583cda837d7ca470bef5f8abf9b19f23e9cd8b927153498", - "zh:772edb5890d72b32868f9fdc0a9a1d4f4701d8e7f8acb37a7ac530d053c776e3", - "zh:798f443dbba6610431dcef832047f6917fb5a4e184a3a776c44e6213fb429cc6", - "zh:cc08dfcc387e2603f6dbaff8c236c1254185450d6cadd6bad92879fe7e7dbce9", - "zh:d5e2c8d7f50f91d6847ddce27b10b721bdfce99c1bbab42a68fa271337d73d63", - "zh:e69a0045440c706f50f84a84ff8b1df520ec9bf757de4b8f9959f2ed20c3f440", - "zh:efc5358573a6403cbea3a08a2fcd2407258ac083d9134c641bdcb578966d8bdf", - "zh:f627a255e5809ec2375f79949c79417847fa56b9e9222ea7c45a463eb663f137", - "zh:f7c02f762e4cf1de7f58bde520798491ccdd54a5bd52278d579c146d1d07d4f0", - "zh:fbd1fee2c9df3aa19cf8851ce134dea6e45ea01cb85695c1726670c285797e25", - ] -} diff --git a/terraform/gcp/create_graph.sh b/terraform/gcp/create_graph.sh deleted file mode 100755 index 2356390..0000000 --- a/terraform/gcp/create_graph.sh +++ /dev/null @@ -1,266 +0,0 @@ -#/bin/bash -set -euo pipefail - -APOLLO_KEY=${APOLLO_KEY:-""} -CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -ACCOUNT_ID=${ACCOUNT_ID:-""} -GRAPH_ID=$CLUSTER_PREFIX-$(echo $RANDOM | shasum | head -c 6) -HEADER=${HEADER:-""} -VARIANTS=("dev" "prod") - -if [[ "$APOLLO_KEY" == "" ]]; then - echo "Must provide APOLLO_KEY in environment" 1>&2 - exit 1 -fi - -if [[ $(which jq) == "" ]]; then - echo "please install jq before continuing: https://stedolan.github.io/jq/" - exit 1 -fi - -if [[ $(which rover) == "" ]]; then - echo "rover not installed; see: https://www.apollographql.com/docs/rover/getting-started/" - exit 1 -fi - -# if an account id is not provided, fetch it from Studio -if [[ $ACCOUNT_ID == "" ]]; then - ACCOUNT_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --url 'https://graphql.api.apollographql.com/api/graphql' - --data '{"query":"{ me { ... on User { memberships { permission account { id } } } } }"}' - ) - - if [[ $HEADER != "" ]]; then - ACCOUNT_ARGS+=(--header "$HEADER") - fi - - ACCOUNT_RESP=$(curl "${ACCOUNT_ARGS[@]}") - ACCOUNT_COUNT=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships | length") - - # if more than one account exists, ask user to choose - if [[ $ACCOUNT_COUNT > 1 ]]; then - echo "Apollo Studio returned more than one account." - echo "Please select an account to use:" - echo "" - - # Store account IDs in array - ACCOUNT_IDS=($(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[].account.id")) - - # Display numbered list of accounts - INDEX=1 - for account_id in "${ACCOUNT_IDS[@]}"; do - echo " $INDEX) $account_id" - ((INDEX++)) - done - - echo "" - read -p "Enter the number of the account to use (1-$ACCOUNT_COUNT): " SELECTION - - # Validate selection - if [[ ! "$SELECTION" =~ ^[0-9]+$ ]] || [[ "$SELECTION" -lt 1 ]] || [[ "$SELECTION" -gt $ACCOUNT_COUNT ]]; then - echo "Invalid selection. Please run the script again and choose a valid number." - exit 1 - fi - - ACCOUNT_ID=${ACCOUNT_IDS[$((SELECTION - 1))]} - echo "Selected account: $ACCOUNT_ID" - else - ACCOUNT_ID=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[0].account.id") - fi -fi - -echo "Creating graph $GRAPH_ID on account $ACCOUNT_ID..." - -CREATE_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --url 'https://graphql.api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreateGraph(\$accountId: ID!, \$newServiceId: ID!, \$name: String, \$onboardingArchitecture: OnboardingArchitecture) { newService(accountId: \$accountId, id: \$newServiceId, name: \$name, onboardingArchitecture: \$onboardingArchitecture) { id apiKeys { token } } }\",\"variables\":{\"accountId\":\"$ACCOUNT_ID\",\"newServiceId\":\"$GRAPH_ID\",\"name\":\"Build a Supergraph $(date +"%Y-%m-%d")\",\"onboardingArchitecture\":\"SUPERGRAPH\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_ARGS+=(--header "$HEADER") -fi - -CREATE_RESP=$(curl "${CREATE_ARGS[@]}") - -IS_SUCCESS=$(echo $CREATE_RESP | jq -r ".data.newService") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating graph" - echo $CREATE_RESP | jq . - exit 1 -fi - -GRAPH_KEY=$(echo $CREATE_RESP | jq -r ".data.newService.apiKeys[0].token") - -# Create Operator API key for the operator to use -echo "Creating Operator API key..." - -CREATE_OPERATOR_KEY_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header "apollographql-client-name: reference-architecture" - --header "apollographql-client-version: 1.0" - --header 'content-type: application/json' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreateOperatorKey(\$name: String!, \$type: GraphOsKeyType!, \$organizationId: ID!) { organization(id: \$organizationId) { createKey(name: \$name, type: \$type) { id keyName expiresAt token } } }\",\"variables\":{\"name\":\"operator\",\"type\":\"OPERATOR\",\"organizationId\":\"$ACCOUNT_ID\"}}" -) - -CREATE_OPERATOR_KEY_RESP=$(curl "${CREATE_OPERATOR_KEY_ARGS[@]}") - -OPERATOR_KEY=$(echo $CREATE_OPERATOR_KEY_RESP | jq -r ".data.organization.createKey.token") -if [[ "$OPERATOR_KEY" == "null" ]]; then - echo "Error creating operator key" - echo $CREATE_OPERATOR_KEY_RESP | jq . - exit 1 -fi - -echo "Operator key created successfully" - -# Note: Subgraph schema publishing is now handled by the Apollo GraphOS Operator -# when Subgraph CRDs are deployed. No manual rover publish commands needed. -# We create variants by publishing dummy subgraphs to them. - -echo "Creating dev and prod variants by publishing dummy subgraphs..." - -for variant in "${VARIANTS[@]}"; do - echo "Creating variant: $variant" - - PUBLISH_ARGS=( - --silent - --header "x-api-key: $GRAPH_KEY" - --header "apollographql-client-name: reference-architecture" - --header "apollographql-client-version: 1.0" - --header 'content-type: application/json' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation PublishSubgraph(\$graphId: ID!, \$graphVariant: String!, \$name: String!, \$revision: String!, \$activePartialSchema: PartialSchemaInput!, \$url: String) { graph(id: \$graphId) { publishSubgraph(graphVariant: \$graphVariant, name: \$name, revision: \$revision, activePartialSchema: \$activePartialSchema, url: \$url) { subgraphsCreated errors { message locations { column line } code } wasCreated wasUpdated } } }\",\"variables\":{\"graphId\":\"$GRAPH_ID\",\"graphVariant\":\"$variant\",\"name\":\"temp-subgraph\",\"revision\":\"1\",\"activePartialSchema\":{\"sdl\":\"type Query { temp: String }\"},\"url\":\"http://localhost:1234\"}}" - ) - - PUBLISH_RESP=$(curl "${PUBLISH_ARGS[@]}") - - if [[ $(echo $PUBLISH_RESP | jq -r ".data.graph.publishSubgraph.errors | length") > 0 ]]; then - echo "Error creating variant $variant" - echo $PUBLISH_RESP | jq . - exit 1 - fi - - echo "Created variant: $variant" -done - -# Create persisted query lists for dev and prod -# dev -CREATE_PQ_ARGS_DEV=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_PQ_ARGS_DEV+=(--header "$HEADER") -fi - -CREATE_PQ_DEV_RESP=$(curl "${CREATE_PQ_ARGS_DEV[@]}") - -IS_SUCCESS=$(echo $CREATE_PQ_DEV_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating pq list for dev" - echo $CREATE_PQ_DEV_RESP | jq . - exit 1 -fi - -DEV_PQ_ID=$(echo $CREATE_PQ_DEV_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList.id") - -UPDATE_DEV_PQ_LIST_ARGS=( - --silent - --request POST - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - UPDATE_DEV_PQ_LIST_ARGS+=(--header "$HEADER") -fi - -UPDATE_DEV_PQ_LIST_RESP=$(curl "${UPDATE_DEV_PQ_LIST_ARGS[@]}") - -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for dev" - echo $UPDATE_DEV_PQ_LIST_RESP | jq . - exit 1 -fi - -# prod -CREATE_PQ_ARGS_PROD=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_PQ_ARGS_DEV+=(--header "$HEADER") -fi - -CREATE_PQ_PROD_RESP=$(curl "${CREATE_PQ_ARGS_PROD[@]}") -IS_SUCCESS=$(echo $CREATE_PQ_PROD_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating pq list for prod" - echo $CREATE_PQ_PROD_RESP | jq . - exit 1 -fi - -PROD_PQ_ID=$(echo $CREATE_PQ_PROD_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList.id") - -UPDATE_PROD_PQ_LIST_ARGS=( - --silent - --request POST - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" -) -if [[ $HEADER != "" ]]; then - UPDATE_PROD_PQ_LIST_ARGS+=(--header "$HEADER") -fi - -UPDATE_PROD_PQ_LIST_RESP=$(curl "${UPDATE_PROD_PQ_LIST_ARGS[@]}") - -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for prod" - echo $UPDATE_PROD_PQ_LIST_RESP | jq . - exit 1 -fi - -echo '' -echo "Adding Apollo credentials as Terraform variables in .env..." -echo '' >> .env -echo "export TF_VAR_apollo_key=\"$GRAPH_KEY\"" >> .env -echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env -echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env -echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env -echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo '' >> .env -echo 'Re-run `source .env` to load them.' diff --git a/terraform/gcp/github.tf b/terraform/gcp/github.tf deleted file mode 100644 index 5fc1514..0000000 --- a/terraform/gcp/github.tf +++ /dev/null @@ -1,72 +0,0 @@ -provider "github" { - token = var.github_token -} - -# Infra repo for Router, Otel, load testing -resource "github_repository" "repo" { - name = "implemented-reference-architecture" - description = "Apollo supergraph reference architecture repository" - visibility = "public" - depends_on = [ - module.gke - ] - template { - owner = "andywgarcia" - repository = "reference-architecture-1" - } -} - -### Github -> GKE Serivce Accounts and credentials ### - -# "Service Agent" credentials for infra repo (so it can manage more resources like cluster roles) -resource "google_service_account" "github-manage-gsa" { - project = var.project_id - account_id = "${substr(var.demo_name, 0, 12)}-github-manage-gsa" - display_name = "${substr(var.demo_name, 0, 12)}-github-manage-gsa" -} -resource "google_project_iam_member" "github-manage-admin" { - project = var.project_id - role = "roles/container.serviceAgent" - member = "serviceAccount:${google_service_account.github-manage-gsa.email}" -} -resource "google_service_account_key" "github-manage-key" { - service_account_id = google_service_account.github-manage-gsa.name -} -resource "local_file" "github-manage-key" { - content = base64decode(google_service_account_key.github-manage-key.private_key) - filename = "${path.module}/github-manage-key.json" -} - -### GH Action Secrets ### - -# infra repo secrets: the only different value is GCP_CREDENTIALS -resource "github_actions_secret" "apollo_graph_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_GRAPH_ID" - plaintext_value = var.apollo_graph_id -} -resource "github_actions_secret" "apollo_key" { - repository = github_repository.repo.name - secret_name = "APOLLO_KEY" - plaintext_value = var.apollo_key -} -resource "github_actions_secret" "pq_dev_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_PQ_DEV_ID" - plaintext_value = var.pq_dev_id -} -resource "github_actions_secret" "pq_prod_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_PQ_PROD_ID" - plaintext_value = var.pq_prod_id -} -resource "github_actions_secret" "cluster_prefix" { - repository = github_repository.repo.name - secret_name = "CLUSTER_PREFIX" - plaintext_value = var.demo_name -} -resource "github_actions_secret" "gcp_secret" { - repository = github_repository.repo.name - secret_name = "GCP_CREDENTIALS" - plaintext_value = base64decode(google_service_account_key.github-manage-key.private_key) -} diff --git a/terraform/gcp/gke.tf b/terraform/gcp/gke.tf deleted file mode 100644 index 5531fae..0000000 --- a/terraform/gcp/gke.tf +++ /dev/null @@ -1,46 +0,0 @@ -# For each stage in `var.demo_stages`, create a Kubernetes cluster. -# -# The clusters a generally configured the same. The subnet IP ranges, node -# instance type, and node counts are configurable per cluster. -# -# The clusters are named `{demo_name}-{stage}`, e.g. "apollo-supergraph-k8s-dev". -module "gke_auth" { - source = "terraform-google-modules/kubernetes-engine/google//modules/auth" - for_each = { - for index, stage in var.demo_stages : stage.name => stage - } - depends_on = [module.gke] - project_id = var.project_id - location = module.gke[each.key].location - cluster_name = module.gke[each.key].name -} - -module "gke" { - source = "terraform-google-modules/kubernetes-engine/google" - for_each = { - for index, stage in var.demo_stages : stage.name => stage - } - - project_id = var.project_id - name = "${var.demo_name}-${each.value.name}" - regional = true - region = var.project_region - disable_legacy_metadata_endpoints = true - - network = module.gcp-network[each.key].network_name - subnetwork = module.gcp-network[each.key].subnets_names[0] - ip_range_pods = "${var.demo_name}-${each.value.name}-pods" - ip_range_services = "${var.demo_name}-${each.value.name}-services" - node_pools = [ - { - name = "${each.value.name}-node-pool" - machine_type = each.value.node_type - min_count = each.value.min_nodes - max_count = each.value.max_nodes - disk_size_gb = 20 - }, - ] - node_pools_tags = { - all = ["gke-node", "${var.project_id}-gke"] - } -} diff --git a/terraform/gcp/main.tf b/terraform/gcp/main.tf deleted file mode 100644 index 279fea3..0000000 --- a/terraform/gcp/main.tf +++ /dev/null @@ -1,15 +0,0 @@ -terraform { - required_providers { - google = { - source = "hashicorp/google" - } - github = { - source = "integrations/github" - } - } -} - -provider "google" { - project = var.project_id - region = var.project_region -} diff --git a/terraform/gcp/metrics.tf b/terraform/gcp/metrics.tf deleted file mode 100644 index b7bc8c9..0000000 --- a/terraform/gcp/metrics.tf +++ /dev/null @@ -1,18 +0,0 @@ -# Google Service account; for more, see: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/googlecloudexporter -resource "google_service_account" "metrics-writer" { - project = var.project_id - account_id = "${substr(var.demo_name, 0, 12)}-metrics-writer" - display_name = "${substr(var.demo_name, 0, 12)}-metrics-writer" -} - -resource "google_project_iam_member" "cloud-trace-iam" { - project = var.project_id - role = "roles/cloudtrace.agent" - member = "serviceAccount:${google_service_account.metrics-writer.email}" - -} -resource "google_project_iam_member" "cloud-metrics-iam" { - project = var.project_id - role = "roles/monitoring.metricWriter" - member = "serviceAccount:${google_service_account.metrics-writer.email}" -} diff --git a/terraform/gcp/outputs.tf b/terraform/gcp/outputs.tf deleted file mode 100644 index 8c81c23..0000000 --- a/terraform/gcp/outputs.tf +++ /dev/null @@ -1,11 +0,0 @@ -output "kubernetes_cluster_names" { - value = { - for k, v in module.gke : k => v.name - } - description = "Cluster names for each stage" -} - -output "repo" { - value = github_repository.repo.html_url - description = "Respository URL" -} diff --git a/terraform/gcp/setup_clusters.sh b/terraform/gcp/setup_clusters.sh deleted file mode 100755 index 8f563b9..0000000 --- a/terraform/gcp/setup_clusters.sh +++ /dev/null @@ -1,123 +0,0 @@ -#/bin/bash -set -euxo pipefail - -# default vars -CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -PROJECT_REGION=${PROJECT_REGION:-"us-east1"} -PROJECT_CLUSTERS=("${CLUSTER_PREFIX}-dev" "${CLUSTER_PREFIX}-prod") -# end default vars - -if [[ $(which gcloud) == "" ]]; then - echo "gcloud not installed" - exit 1 -fi - -if [[ $(which kubectl) == "" ]]; then - echo "kubectl not installed" - exit 1 -fi - -if [[ $(which kubectx) == "" ]]; then - echo "kubectx not installed" - exit 1 -fi - -if [[ -z "$PROJECT_ID" ]]; then - echo "Must provide PROJECT_ID in environment" 1>&2 - exit 1 -fi - -environment_setup(){ - echo "Configuring Kubeconfig for ${1}..." - gcloud container clusters get-credentials ${1} --zone ${PROJECT_REGION} --project ${PROJECT_ID} - - # short context aliases: supports `kubectx apollo-supergraph-k8s-dev` - kubectx ${1}=. - - # monitoring setup: namespace, service account, and binding - # the service account name matches the otel collector's service account in its helm chart - kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - - kubectl create serviceaccount -n "monitoring" "metrics-writer" --dry-run=client -o yaml | kubectl apply -f - - kubectl annotate serviceaccount -n "monitoring" "metrics-writer" "iam.gke.io/gcp-service-account=${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" --overwrite - gcloud iam service-accounts add-iam-policy-binding \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${PROJECT_ID}.svc.id.goog[monitoring/metrics-writer]" \ - "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" - - # Apollo GraphOS Operator setup - echo "Installing Apollo GraphOS Operator..." - kubectl create namespace apollo-operator --dry-run=client -o yaml | kubectl apply -f - - kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - - - # Create operator API key secret (requires OPERATOR_KEY to be set) - if [[ -n "$OPERATOR_KEY" ]]; then - kubectl create secret generic apollo-api-key \ - --from-literal="APOLLO_KEY=$OPERATOR_KEY" \ - -n apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - echo "Operator API key secret created" - else - echo "Warning: OPERATOR_KEY not set. Operator secret not created." - fi - - # Create GitHub Container Registry image pull secret (optional, requires TF_VAR_github_token) - if [[ -n "$TF_VAR_github_token" && -n "$GITHUB_ORG" ]]; then - echo "Creating GitHub Container Registry image pull secret..." - # Create in default namespace - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=default \ - --dry-run=client -o yaml | kubectl apply -f - - - # Create in apollo namespace - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=apollo \ - --dry-run=client -o yaml | kubectl apply -f - - - # Create in apollo-operator namespace and patch service account - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - - kubectl patch serviceaccount apollo-operator -n apollo-operator \ - -p '{"imagePullSecrets":[{"name":"ghcr-secret"}]}' || true - - echo "GitHub Container Registry image pull secret created" - else - echo "Warning: TF_VAR_github_token and/or GITHUB_ORG not set. Image pull secret not created." - echo " Subgraphs may fail to pull images if they are private. Set these variables to enable image pull authentication." - fi - - # Install operator using Helm - if [[ $(which helm) != "" ]]; then - helm upgrade --install --atomic apollo-operator \ - oci://registry-1.docker.io/apollograph/operator-chart \ - -n apollo-operator \ - --create-namespace \ - -f - < stage - } - project_id = var.project_id - network_name = "${var.demo_name}-${each.value.name}" - subnets = [ - { - subnet_name = "${var.demo_name}-${each.value.name}" - subnet_ip = each.value.subnet_range - subnet_region = var.project_region - } - ] - secondary_ranges = { - "${var.demo_name}-${each.value.name}" = [ - { - ip_cidr_range = each.value.ip_range_pods - range_name = "${var.demo_name}-${each.value.name}-pods" - }, - { - ip_cidr_range = each.value.ip_range_services - range_name = "${var.demo_name}-${each.value.name}-services" - } - ] - } -} diff --git a/terraform/minikube/github.tf b/terraform/minikube/github.tf deleted file mode 100644 index e72f9ef..0000000 --- a/terraform/minikube/github.tf +++ /dev/null @@ -1,38 +0,0 @@ -provider "github" { - token = var.github_token -} - -# Infra repo for Router, Otel, load testing -resource "github_repository" "repo" { - name = "reference-architecture" - description = "Apollo reference architecture repository" - visibility = "public" - template { - owner = "apollosolutions" - repository = "reference-architecture" - } -} - -### GH Action Secrets ### - -# infra repo secrets: the only different value is GCP_CREDENTIALS -resource "github_actions_secret" "apollo_graph_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_GRAPH_ID" - plaintext_value = var.apollo_graph_id -} -resource "github_actions_secret" "apollo_key" { - repository = github_repository.repo.name - secret_name = "APOLLO_KEY" - plaintext_value = var.apollo_key -} -resource "github_actions_secret" "pq_dev_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_PQ_DEV_ID" - plaintext_value = var.pq_dev_id -} -resource "github_actions_secret" "pq_prod_id" { - repository = github_repository.repo.name - secret_name = "APOLLO_PQ_PROD_ID" - plaintext_value = var.pq_prod_id -} diff --git a/terraform/minikube/main.tf b/terraform/minikube/main.tf deleted file mode 100644 index fbe53be..0000000 --- a/terraform/minikube/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -terraform { - required_providers { - github = { - source = "integrations/github" - } - } -} diff --git a/terraform/minikube/outputs.tf b/terraform/minikube/outputs.tf deleted file mode 100644 index dc356f7..0000000 --- a/terraform/minikube/outputs.tf +++ /dev/null @@ -1,12 +0,0 @@ -output "repo" { - value = github_repository.repo.html_url - description = "Infra (router, o11y) repo URLs" -} - -resource "local_file" "repo_env_file"{ - content = < Date: Mon, 10 Nov 2025 12:17:44 -0800 Subject: [PATCH 09/31] Working minikube --- README.md | 65 +++++- client/docker/nginx/conf.d/default.conf | 16 ++ deploy/client/templates/ingress.yaml | 16 +- deploy/client/values.yaml | 1 + deploy/operator-resources/supergraph-dev.yaml | 2 +- docs/cleanup.md | 206 +++++++----------- docs/setup.md | 87 ++++++-- scripts/minikube/.env.sample | 18 ++ scripts/minikube/05-deploy-subgraphs.sh | 4 +- .../minikube/06-deploy-operator-resources.sh | 2 +- scripts/minikube/07-deploy-ingress.sh | 158 +++++++++----- scripts/minikube/08-deploy-client.sh | 99 ++++----- 12 files changed, 405 insertions(+), 269 deletions(-) create mode 100644 scripts/minikube/.env.sample diff --git a/README.md b/README.md index 7d6992c..2932015 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,70 @@ Once the architecture is fully stood up, you'll have: ### The ending architecture -![Software Development Life Cycle](/images/sdlc.png) +```mermaid +graph TB + subgraph "Minikube Cluster" + subgraph "Client Namespace" + Client[React Client
Apollo Client] + end + + subgraph "Apollo Namespace" + Router[Apollo Router
Managed by Operator] + Operator[Apollo GraphOS Operator] + SupergraphSchema[SupergraphSchema CRD] + Supergraph[Supergraph CRD] + end + + subgraph "Subgraph Namespaces" + Checkout[Checkout Subgraph] + Discovery[Discovery Subgraph] + Inventory[Inventory Subgraph] + Orders[Orders Subgraph] + Products[Products Subgraph] + Reviews[Reviews Subgraph] + Shipping[Shipping Subgraph] + Users[Users Subgraph] + end + + Ingress[NGINX Ingress Controller] + end + + subgraph "External Services" + GraphOS[Apollo GraphOS Studio
Schema Composition] + end + + Client -->|HTTP| Ingress + Ingress -->|HTTP| Router + Router -->|GraphQL| Checkout + Router -->|GraphQL| Discovery + Router -->|GraphQL| Inventory + Router -->|GraphQL| Orders + Router -->|GraphQL| Products + Router -->|GraphQL| Reviews + Router -->|GraphQL| Shipping + Router -->|GraphQL| Users + + Operator -->|Manages| SupergraphSchema + Operator -->|Manages| Supergraph + Operator -->|Publishes Schemas| GraphOS + GraphOS -->|Composed Schema| SupergraphSchema + SupergraphSchema -->|Schema Reference| Supergraph + Supergraph -->|Deploys| Router + + Checkout -.->|Schema via CRD| Operator + Discovery -.->|Schema via CRD| Operator + Inventory -.->|Schema via CRD| Operator + Orders -.->|Schema via CRD| Operator + Products -.->|Schema via CRD| Operator + Reviews -.->|Schema via CRD| Operator + Shipping -.->|Schema via CRD| Operator + Users -.->|Schema via CRD| Operator + + style Router fill:#e1f5ff + style Operator fill:#fff4e1 + style GraphOS fill:#e8f5e9 + style Client fill:#f3e5f5 +``` ### Prerequisites diff --git a/client/docker/nginx/conf.d/default.conf b/client/docker/nginx/conf.d/default.conf index 7b7cb6c..5c849f7 100644 --- a/client/docker/nginx/conf.d/default.conf +++ b/client/docker/nginx/conf.d/default.conf @@ -3,6 +3,22 @@ server { root /usr/share/nginx/html; index index.html; + # Proxy GraphQL requests to the router service + # The service name will be set via environment variable at build time + # The trailing slash in proxy_pass strips /graphql and sends to / (where Apollo Router serves) + location /graphql { + proxy_pass http://${ROUTER_SERVICE_NAME}.apollo.svc.cluster.local:80/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache_bypass $http_upgrade; + } + + # Serve static files for the React app location / { try_files $uri $uri/ /index.html; } diff --git a/deploy/client/templates/ingress.yaml b/deploy/client/templates/ingress.yaml index a5ff711..e77251d 100644 --- a/deploy/client/templates/ingress.yaml +++ b/deploy/client/templates/ingress.yaml @@ -23,9 +23,15 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: - defaultBackend: - service: - name: {{ .Values.ingress.name }} - port: - number: {{ $svcPort }} + ingressClassName: {{ .Values.ingress.className }} + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ .Values.ingress.name }} + port: + number: {{ $svcPort }} {{- end }} diff --git a/deploy/client/values.yaml b/deploy/client/values.yaml index ddcd1cd..fdca0ca 100644 --- a/deploy/client/values.yaml +++ b/deploy/client/values.yaml @@ -23,6 +23,7 @@ service: ingress: enabled: true + className: nginx name: web tls: [] diff --git a/deploy/operator-resources/supergraph-dev.yaml b/deploy/operator-resources/supergraph-dev.yaml index 807f6f8..2cf03d9 100644 --- a/deploy/operator-resources/supergraph-dev.yaml +++ b/deploy/operator-resources/supergraph-dev.yaml @@ -4,7 +4,7 @@ metadata: name: reference-architecture-dev namespace: apollo spec: - replicas: 1 + replicas: 3 podTemplate: routerVersion: 2.7.0 resources: diff --git a/docs/cleanup.md b/docs/cleanup.md index e254fa8..1323fe1 100644 --- a/docs/cleanup.md +++ b/docs/cleanup.md @@ -1,184 +1,126 @@ -## Cleanup +# Cleanup -⏱ Estimated time: 15 minutes +⏱ Estimated time: 5 minutes -Running Google Cloud or AWS resources will continue to incur costs on your account so we have documented all the steps to take for a proper tear-down. +This guide covers cleaning up all resources deployed to your local Minikube cluster. -### Automated cleanup +## Delete Operator-Managed Resources -### Delete Operator-Managed Resources +Before deleting Kubernetes resources, first remove the operator-managed CRDs. Make sure you have your `ENVIRONMENT` variable set (or load it from `.env`): -Before deleting Kubernetes resources, first remove the operator-managed CRDs. **The following steps are provided for both dev and prod clusters:** +```bash +# Load environment variables if needed +if [ -f .env ]; then + source .env +fi -```sh -# Start with dev cluster -kubectx apollo-supergraph-k8s-dev +ENVIRONMENT=${ENVIRONMENT:-dev} +RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" +``` + +Delete operator-managed resources: +```bash # Delete Supergraph resources (this deletes the router deployment) -kubectl delete supergraphs reference-architecture-dev -n apollo +kubectl delete supergraphs ${RESOURCE_NAME} -n apollo || true # Delete SupergraphSchema resources -kubectl delete supergraphschemas reference-architecture-dev -n apollo +kubectl delete supergraphschemas ${RESOURCE_NAME} -n apollo || true -# Delete Ingress resources for the router +# Delete Ingress resources kubectl delete ingress router -n apollo || true +kubectl delete ingress client -n client || true # Delete Subgraph resources (this will also stop schema publishing) -kubectl delete subgraph --all --all-namespaces +kubectl delete subgraph --all --all-namespaces || true +``` + +## Uninstall Helm Releases + +Uninstall all Helm releases: -# Uninstall Helm releases -helm uninstall coprocessor -n apollo -helm uninstall client -n client +```bash +# Uninstall client (if deployed) +helm uninstall client -n client || true -# Uninstall subgraph Helm releases before deleting namespaces +# Uninstall coprocessor (if deployed) +helm uninstall coprocessor -n apollo || true + +# Uninstall subgraph Helm releases for subgraph in checkout discovery inventory orders products reviews shipping users; do helm uninstall $subgraph -n $subgraph || true done +``` + +## Delete Namespaces + +Delete all application namespaces: -# Delete subgraph namespaces (each subgraph has its own namespace) -kubectl delete namespace checkout discovery inventory orders products reviews shipping users +```bash +# Delete subgraph namespaces +kubectl delete namespace checkout discovery inventory orders products reviews shipping users || true # Delete client namespace -kubectl delete namespace client +kubectl delete namespace client || true # Delete operator API key secret (contains sensitive data) -# Note: Helm release secrets (sh.helm.release.v1.*) are automatically cleaned up by helm uninstall kubectl delete secret apollo-api-key -n apollo-operator || true # Uninstall the Apollo GraphOS Operator -# This will also automatically clean up Helm release secrets (sh.helm.release.v1.*) -helm uninstall apollo-operator -n apollo-operator +helm uninstall apollo-operator -n apollo-operator || true # Delete operator namespaces -kubectl delete namespace apollo-operator apollo - -# Repeat for prod cluster -kubectx apollo-supergraph-k8s-prod - -kubectl delete supergraphs reference-architecture-prod -n apollo -kubectl delete supergraphschemas reference-architecture-prod -n apollo -kubectl delete ingress router -n apollo || true -kubectl delete subgraph --all --all-namespaces -helm uninstall coprocessor -n apollo -helm uninstall client -n client - -# Uninstall subgraph Helm releases before deleting namespaces -for subgraph in checkout discovery inventory orders products reviews shipping users; do - helm uninstall $subgraph -n $subgraph || true -done - -kubectl delete namespace checkout discovery inventory orders products reviews shipping users -kubectl delete namespace client - -# Delete operator API key secret (contains sensitive data) -# Note: Helm release secrets (sh.helm.release.v1.*) are automatically cleaned up by helm uninstall -kubectl delete secret apollo-api-key -n apollo-operator || true - -# Uninstall the Apollo GraphOS Operator -# This will also automatically clean up Helm release secrets (sh.helm.release.v1.*) -helm uninstall apollo-operator -n apollo-operator -kubectl delete namespace apollo-operator apollo +kubectl delete namespace apollo-operator apollo || true ``` -### Cloud-specific steps +## Clean Up Apollo GraphOS Resources (Optional) -There are a few cloud-specific steps you'll need to take. +If you want to clean up the Apollo GraphOS graph and variants you created: -#### GCP +1. Go to [Apollo GraphOS Studio](https://studio.apollographql.com) +2. Navigate to your graph +3. Delete the graph or specific variants as needed -**Clean up GCP Workload Identity bindings** (created during setup for monitoring): +**Note:** The operator API key created during setup will remain in your Apollo GraphOS account. You can delete it from [User Settings > API Keys](https://studio.apollographql.com/user-settings/api-keys) if desired. -```sh -# You'll need your PROJECT_ID and CLUSTER_PREFIX (default: apollo-supergraph-k8s) -CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -PROJECT_ID="" +## Delete Minikube Cluster (Optional) -# Remove workload identity binding (shared across dev and prod clusters) -# Note: This only needs to be run once, not per cluster -gcloud iam service-accounts remove-iam-policy-binding \ - "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${PROJECT_ID}.svc.id.goog[monitoring/metrics-writer]" || true -``` +If you want to completely remove the Minikube cluster: -**Note:** The GCP IAM service account `${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com` may be created by Terraform. If it's not removed by `terraform destroy`, you can delete it manually: +```bash +# Stop the cluster +minikube stop -```sh -gcloud iam service-accounts delete "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" || true +# Delete the cluster +minikube delete ``` -In order to delete some non-Kubernetes resources created by Google Cloud, it's easiest to just delete everything: +Or if you have multiple Minikube profiles and want to delete all: -```sh -kubectx apollo-supergraph-k8s-dev -kubectl delete daemonsets,replicasets,services,deployments,pods,rc,ingress --all --all-namespaces +```bash +minikube delete --all ``` -The command may hang at the end. You can kill the process (`ctrl-c`) and repeat with the prod cluster: - -```sh -kubectx apollo-supergraph-k8s-prod -kubectl delete daemonsets,replicasets,services,deployments,pods,rc,ingress --all --all-namespaces -``` +## Clean Up Local Docker Images (Optional) -#### AWS - -In order to ensure the load balancers are properly removed, and the IAM service roles are removed, run the following, replacing `apollo-supergraph-k8s` with the appropriate cluster prefix if modified: - -```sh -# dev -eksctl delete iamserviceaccount \ - --cluster=apollo-supergraph-k8s-dev \ - --name="aws-load-balancer-controller" -aws cloudformation delete-stack --stack-name eksctl-apollo-supergraph-k8s-dev-addon-iamserviceaccount-kube-system-aws-load-balancer-controller -# prod -eksctl delete iamserviceaccount \ - --cluster=apollo-supergraph-k8s-prod \ - --name="aws-load-balancer-controller" -aws cloudformation delete-stack --stack-name eksctl-apollo-supergraph-k8s-prod-addon-iamserviceaccount-kube-system-aws-load-balancer-controller -``` +If you want to remove the local Docker images built for this project: -### Delete Monitoring Resources +```bash +# Configure Docker to use Minikube's daemon (if cluster is still running) +eval $(minikube docker-env) -The monitoring namespace may contain additional resources (InfluxDB, Grafana, Zipkin, etc.) that should be cleaned up. **Repeat these steps for both dev and prod clusters:** - -```sh -# Start with dev cluster -kubectx apollo-supergraph-k8s-dev - -# Uninstall monitoring components (if deployed) -helm uninstall influxdb -n monitoring || true -helm uninstall grafana -n monitoring || true -helm uninstall otel-collector -n monitoring || true -helm uninstall zipkin -n zipkin || true - -# Delete monitoring namespaces -kubectl delete namespace monitoring zipkin || true - -# Repeat for prod cluster -kubectx apollo-supergraph-k8s-prod - -helm uninstall influxdb -n monitoring || true -helm uninstall grafana -n monitoring || true -helm uninstall otel-collector -n monitoring || true -helm uninstall zipkin -n zipkin || true -kubectl delete namespace monitoring zipkin || true +# Remove local images +docker rmi checkout:local discovery:local inventory:local orders:local \ + products:local reviews:local shipping:local users:local \ + coprocessor:local client:local || true ``` -### Remaining steps - -Then you can destroy all the provisioned resources (Kubernetes clusters, GitHub repositories) with terraform: - -```sh -cd terraform/ -terraform destroy # takes roughly 10 minutes -``` +## Clean Up Environment Variables (Optional) -Lastly, you can remove the contexts from your `kubectl`: +If you want to remove the `.env` file created during setup: -```sh -kubectl config delete-context apollo-supergraph-k8s-dev -kubectl config delete-context apollo-supergraph-k8s-prod +```bash +rm .env ``` -Terraform does not delete the Docker containers from GitHub. Visit `https://github.com/?tab=packages` and delete the packages created by the previous versions of the repos. +**Note:** This will remove your Apollo GraphOS configuration. You'll need to run `02-setup-apollo-graph.sh` again if you want to recreate the graph. diff --git a/docs/setup.md b/docs/setup.md index bb4aacd..2d2635a 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -163,6 +163,7 @@ kubectl describe supergraph reference-architecture-${ENVIRONMENT} -n apollo This script: - Deploys an Ingress resource for external access +- Configures the ingress controller as LoadBalancer for `minikube tunnel` support - Provides access URLs for the router ### Script 08: Deploy Client (Optional) @@ -179,45 +180,97 @@ This script: After running all scripts, you can access your supergraph in several ways: -### Option 1: Using Ingress IP +### Option 1: Using Minikube Tunnel (recommended for LoadBalancer access) -If ingress is configured, get the IP: +The ingress controller has been configured as a LoadBalancer service. To access it via `minikube tunnel`: ```bash -kubectl get ingress router -n apollo +# In a separate terminal, run: +minikube tunnel ``` -Then access at `http://` +**Important notes:** +- Enter your sudo password when prompted +- You may see a message "Starting tunnel for service router" - **this can be safely ignored** +- The "router" is an Ingress resource (not a service), so it doesn't need tunneling +- Only the `ingress-nginx-controller` LoadBalancer service needs tunneling +- Wait for the "Status: running" message +- Access the router at: `http://127.0.0.1/` -### Option 2: Using Minikube Service +**Why you see "router" in the tunnel output:** +The ingress controller automatically sets a LoadBalancer status on Ingress resources, which makes `minikube tunnel` think it needs to tunnel them. However, since the ingress controller is already being tunneled, the router is accessible through it. You can safely ignore this message. + +### Option 2: Using Port Forwarding + +Port forward directly to the router service: ```bash -minikube service reference-architecture-${ENVIRONMENT} -n apollo +kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80 ``` -This will open the router in your default browser. +Then access at `http://localhost:4000` in your browser. + +**Note:** Keep the port-forward command running in a terminal while you access the router. + +### Option 3: Using Ingress via NodePort -### Option 3: Using Port Forwarding +Get the Minikube IP and ingress controller NodePort: ```bash -kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80 +MINIKUBE_IP=$(minikube ip) +NODEPORT=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}') +echo "Access at: http://${MINIKUBE_IP}:${NODEPORT}" ``` -Then access at `http://localhost:4000` +**Note:** This method may not work reliably on macOS due to network routing. Use Option 1 (minikube tunnel) instead. ### Verify Router is Working -Test the router health endpoint: +Test the router with a simple GraphQL query: ```bash -curl http://localhost:4000/.well-known/apollo/server-health +# If using minikube service, it will show you the URL to use +# If using port forwarding, use http://localhost:4000 +curl -X POST http://localhost:4000 \ + -H "Content-Type: application/json" \ + -d '{"query":"{ __typename }"}' ``` -Or visit the router in Apollo Studio: -1. Go to [Apollo GraphOS Studio](https://studio.apollographql.com) -2. Select your graph -3. Navigate to the variant (e.g., "dev") -4. View the router status and metrics +Or test the health endpoint (if accessible on the main port): + +```bash +curl http://localhost:4000/health +``` + +## Step 5: Logging Into the Client Application + +If you deployed the client application (script 08), you can log in using the following test credentials: + +### Test Users + +The application includes three test users: + +| Username | Password | Email | Notes | +|----------|----------|-------|-------| +| `user1` | Any non-empty password | user1@contoso.org | Has 2 credit cards, cart with items | +| `user2` | Any non-empty password | user2@contoso.org | Has 1 debit card, cart with items | +| `user3` | Any non-empty password | user3@contoso.org | Has debit card and bank account, empty cart | + +### Login Instructions + +1. Navigate to the client application (typically at `http://127.0.0.1/` if using minikube tunnel) +2. Click "Login" in the navigation menu +3. Enter one of the test usernames (e.g., `user1`) +4. Enter any non-empty password (e.g., `password`) +5. Optionally enter scopes (comma-separated, e.g., `user:read:email`) +6. Click "Sign In" + +**Note:** The password validation only checks that it's not empty. Any non-empty password will work for authentication. + +### Scopes + +Scopes are optional but can be used to control access to certain fields: +- `user:read:email` - Allows reading the user's email address ## Creating Additional Environments diff --git a/scripts/minikube/.env.sample b/scripts/minikube/.env.sample new file mode 100644 index 0000000..36e3b55 --- /dev/null +++ b/scripts/minikube/.env.sample @@ -0,0 +1,18 @@ +# Environment Variables for Minikube Setup +# Copy this file to .env and fill in your values + +# Apollo GraphOS Personal API Key (required) +# Get it from: https://studio.apollographql.com/user-settings/api-keys +export APOLLO_KEY="your-apollo-personal-api-key" + +# Environment name (required) +# Change this to create a different environment (e.g., "prod", "staging") +export ENVIRONMENT="dev" + +# Cluster prefix for naming (optional, defaults to "apollo-supergraph-k8s") +export CLUSTER_PREFIX="apollo-supergraph-k8s" + +# The following variables will be set automatically by 02-setup-apollo-graph.sh: +# export APOLLO_GRAPH_ID="" +# export APOLLO_KEY="" # Graph API key (different from personal API key) +# export OPERATOR_KEY="" diff --git a/scripts/minikube/05-deploy-subgraphs.sh b/scripts/minikube/05-deploy-subgraphs.sh index 74bdda4..ca5639f 100755 --- a/scripts/minikube/05-deploy-subgraphs.sh +++ b/scripts/minikube/05-deploy-subgraphs.sh @@ -76,8 +76,8 @@ for subgraph in "${SUBGRAPHS[@]}"; do exit 1 fi - # Read schema and escape for YAML - SCHEMA_CONTENT=$(cat "$SCHEMA_FILE" | sed 's/^/ /') + # Read schema and indent for YAML (6 spaces to be indented relative to 'sdl:') + SCHEMA_CONTENT=$(cat "$SCHEMA_FILE" | sed 's/^/ /') # Create Subgraph CRD YAML cat </dev/null || echo "") - if [ -n "$INGRESS_IP" ]; then - break +# Change ingress controller service to LoadBalancer for minikube tunnel support +echo "Configuring ingress controller for minikube tunnel..." +kubectl patch svc ingress-nginx-controller -n ingress-nginx -p '{"spec":{"type":"LoadBalancer"}}' 2>/dev/null || true + +# Note: We don't create a router ingress here because: +# 1. The client's nginx will proxy /graphql requests to the router service internally +# 2. This avoids ingress conflicts (both router and client can't use path /) +# 3. The router is accessed via Kubernetes service DNS from within the cluster + +echo "Note: Router ingress is not needed - the client's nginx will proxy /graphql requests to the router service" + +# Get router URL - use localhost for minikube tunnel (LoadBalancer) or NodePort fallback +echo "Getting router URL..." +MINIKUBE_IP=$(minikube ip) +INGRESS_NODEPORT=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}' 2>/dev/null || echo "") + +# Check if ingress controller is LoadBalancer (for minikube tunnel) +INGRESS_TYPE=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.type}' 2>/dev/null || echo "") + +# Use localhost for LoadBalancer (minikube tunnel), or NodePort, or default to localhost:4000 for port-forward +# Note: The client's nginx will proxy /graphql requests to the router +if [ "$INGRESS_TYPE" == "LoadBalancer" ]; then + ROUTER_URL="http://127.0.0.1/graphql" + echo "Using localhost URL for minikube tunnel: $ROUTER_URL" + echo "Note: Run 'minikube tunnel' in a separate terminal to access the router" + echo "Note: The client's nginx will proxy /graphql requests to the router service" +elif [ -n "$INGRESS_NODEPORT" ]; then + ROUTER_URL="http://${MINIKUBE_IP}:${INGRESS_NODEPORT}/graphql" + echo "Using ingress NodePort URL: $ROUTER_URL" +else + # Default to localhost for port-forward (user will need to run port-forward separately) + ROUTER_URL="http://localhost:4000/graphql" + echo "Using default localhost URL (use 'kubectl port-forward' to access): $ROUTER_URL" +fi + +# Save to .env file +ENV_FILE=".env" +if [ ! -f "$ENV_FILE" ]; then + touch "$ENV_FILE" +fi + +# Remove old ROUTER_URL if it exists and add new one +if grep -q "^export ROUTER_URL=" "$ENV_FILE"; then + # Use a temp file for sed compatibility across platforms + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s|^export ROUTER_URL=.*|export ROUTER_URL=\"$ROUTER_URL\"|" "$ENV_FILE" + else + sed -i "s|^export ROUTER_URL=.*|export ROUTER_URL=\"$ROUTER_URL\"|" "$ENV_FILE" fi - echo " Waiting for ingress IP... ($i/30)" - sleep 2 -done +else + echo "" >> "$ENV_FILE" + echo "# Router URL (generated by 07-deploy-ingress.sh)" >> "$ENV_FILE" + echo "export ROUTER_URL=\"$ROUTER_URL\"" >> "$ENV_FILE" +fi -if [ -z "$INGRESS_IP" ]; then - echo "" - echo "Error: Ingress did not get an IP address after waiting" - echo "This may indicate an issue with the ingress controller" +echo "Router URL saved to .env file: $ROUTER_URL" + +# Get the NodePort for the ingress controller (for reference) +NODEPORT=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}') + +echo "" +echo "✓ Ingress deployed successfully!" +echo "" +echo "Router URL saved to .env file: $ROUTER_URL" +echo "" +if [ "$INGRESS_TYPE" == "LoadBalancer" ]; then + echo "⚠️ IMPORTANT: The router URL has been set to $ROUTER_URL for minikube tunnel." + echo " You MUST run 'minikube tunnel' for the router to be accessible." + echo " If you prefer a different access method, update ROUTER_URL in .env file." echo "" - echo "Troubleshooting:" - echo " 1. Check ingress controller status: kubectl get pods -n ingress-nginx" - echo " 2. Check ingress status: kubectl describe ingress router -n apollo" - echo " 3. Try restarting ingress: minikube addons disable ingress && minikube addons enable ingress" - exit 1 fi - +echo "To access the router, use one of these methods:" +echo "" +if [ "$INGRESS_TYPE" == "LoadBalancer" ]; then +echo "Option 1: Use minikube tunnel (REQUIRED - URL is set in .env for this method):" +echo " 1. In a separate terminal, run: minikube tunnel" +echo " 2. Enter your sudo password when prompted" +echo " 3. You may see 'Starting tunnel for service router' - this can be ignored" +echo " 4. Wait for 'Status: running' message" +echo " 5. Access the client UI at: http://127.0.0.1/" +echo " 6. GraphQL requests will be proxied to the router via /graphql" echo "" -echo "✓ Router is accessible at:" -echo " http://${INGRESS_IP}" +if [ -n "$NODEPORT" ]; then + echo "Option 2: Access via NodePort (requires updating ROUTER_URL in .env):" + echo " Client UI: http://${MINIKUBE_IP}:${NODEPORT}/" + echo " GraphQL: http://${MINIKUBE_IP}:${NODEPORT}/graphql" + echo " Then update .env: export ROUTER_URL=\"http://${MINIKUBE_IP}:${NODEPORT}/graphql\"" + echo "" +fi +echo "Option 3: Port forward (requires updating ROUTER_URL in .env):" +echo " kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80" +echo " Then update .env: export ROUTER_URL=\"http://localhost:4000/graphql\"" + echo "" +else + echo "Option 1: Access via NodePort:" + echo " http://${MINIKUBE_IP}:${NODEPORT}" + echo "" + echo "Option 2: Port forward:" + echo " kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80" + echo " Then access at: http://localhost:4000" + echo "" +fi +echo "Note: The ingress controller service has been configured as LoadBalancer" +echo "to support minikube tunnel. The router is accessed via the client's nginx proxy." echo "" -echo "You can access the router at the IP above. If you want to use a hostname instead," -echo "you can add this to your /etc/hosts file:" -echo " ${INGRESS_IP} router.local" +echo "The router URL has been saved to .env and will be used by the client deployment." echo "" -echo "Then access at: http://router.local" +echo "Next step: Run 08-deploy-client.sh to deploy the client application (optional)" + diff --git a/scripts/minikube/08-deploy-client.sh b/scripts/minikube/08-deploy-client.sh index 5e1c1f5..623e7d2 100755 --- a/scripts/minikube/08-deploy-client.sh +++ b/scripts/minikube/08-deploy-client.sh @@ -38,17 +38,14 @@ if ! kubectl cluster-info &> /dev/null; then exit 1 fi -# Get router URL for backend configuration -RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" -INGRESS_IP=$(kubectl get ingress router -n apollo -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "") - -if [ -z "$INGRESS_IP" ]; then - echo "Error: Router ingress not found or has no IP address" - echo "Please ensure script 07-deploy-ingress.sh completed successfully" +# Get router URL from .env file +if [[ -z "${ROUTER_URL:-}" ]]; then + echo "Error: ROUTER_URL is not set" + echo "Please run 07-deploy-ingress.sh first to set up the router URL" exit 1 fi -BACKEND_URL="http://${INGRESS_IP}" +BACKEND_URL="$ROUTER_URL" echo "Using backend URL: $BACKEND_URL" # Create client namespace @@ -61,10 +58,28 @@ if [ ! -d "client" ]; then fi # Build client with BACKEND_URL if Dockerfile supports it +RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" if [ -f "client/Dockerfile" ] && grep -q "BACKEND_URL" "client/Dockerfile"; then echo "Building client image with BACKEND_URL=$BACKEND_URL..." + echo "Note: The client's nginx will proxy /graphql requests to the router service" eval $(minikube docker-env) - docker build --build-arg BACKEND_URL="$BACKEND_URL" -t client:local client + # Backup original nginx config + cp client/docker/nginx/conf.d/default.conf client/docker/nginx/conf.d/default.conf.bak + # Replace placeholder with actual service name (handle both macOS and Linux sed) + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s/\${ROUTER_SERVICE_NAME}/$RESOURCE_NAME/g" client/docker/nginx/conf.d/default.conf + else + sed -i "s/\${ROUTER_SERVICE_NAME}/$RESOURCE_NAME/g" client/docker/nginx/conf.d/default.conf + fi + # Verify the replacement worked + if grep -q "\${ROUTER_SERVICE_NAME}" client/docker/nginx/conf.d/default.conf; then + echo "Warning: Placeholder replacement may have failed. Checking config..." + cat client/docker/nginx/conf.d/default.conf + fi + # Build without cache to ensure nginx config is included + docker build --no-cache --build-arg BACKEND_URL="$BACKEND_URL" -t client:local client + # Restore original nginx config + mv client/docker/nginx/conf.d/default.conf.bak client/docker/nginx/conf.d/default.conf fi # Install using Helm @@ -73,35 +88,18 @@ helm upgrade --install client "deploy/client" \ -n client \ --wait -# Deploy ingress for client -echo "Deploying ingress for client..." -cat </dev/null || echo "") + CLIENT_IP=$(kubectl get ingress ${CLIENT_INGRESS_NAME} -n client -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "") if [ -n "$CLIENT_IP" ]; then break fi @@ -111,24 +109,17 @@ done if [ -z "$CLIENT_IP" ]; then echo "" - echo "Error: Client ingress did not get an IP address after waiting" - echo "This may indicate an issue with the ingress controller" + echo "Warning: Client ingress did not get an IP address after waiting" + echo "The ingress may still be configuring. Check status with:" + echo " kubectl get ingress ${CLIENT_INGRESS_NAME} -n client" + echo "" +else + echo "" + echo "✓ Client is accessible at:" + echo " http://${CLIENT_IP}" + echo "" + echo "If using minikube tunnel, access at: http://127.0.0.1/" + echo "(The client ingress uses the same LoadBalancer as the router)" echo "" - echo "Troubleshooting:" - echo " 1. Check ingress controller status: kubectl get pods -n ingress-nginx" - echo " 2. Check ingress status: kubectl describe ingress client -n client" - exit 1 fi -echo "" -echo "✓ Client is accessible at:" -echo " http://${CLIENT_IP}" -echo "" -echo "You can access the client at the IP above. If you want to use a hostname instead," -echo "you can add this to your /etc/hosts file:" -echo " ${CLIENT_IP} client.local" -echo "" -echo "Then access at: http://client.local" -echo "" -echo "✓ Client deployment complete!" - From 1cb0149522674b26e467920b5e25b781365f80f7 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 12:24:20 -0800 Subject: [PATCH 10/31] Refine cleanup and operator guide documentation by removing outdated comments and improving clarity in resource management steps. Ensure consistency in environment variable usage across setup instructions. --- docs/cleanup.md | 28 ---------------------------- docs/operator-guide.md | 25 ------------------------- docs/setup.md | 19 ++++++++----------- 3 files changed, 8 insertions(+), 64 deletions(-) diff --git a/docs/cleanup.md b/docs/cleanup.md index 1323fe1..896f9c8 100644 --- a/docs/cleanup.md +++ b/docs/cleanup.md @@ -9,7 +9,6 @@ This guide covers cleaning up all resources deployed to your local Minikube clus Before deleting Kubernetes resources, first remove the operator-managed CRDs. Make sure you have your `ENVIRONMENT` variable set (or load it from `.env`): ```bash -# Load environment variables if needed if [ -f .env ]; then source .env fi @@ -21,17 +20,10 @@ RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" Delete operator-managed resources: ```bash -# Delete Supergraph resources (this deletes the router deployment) kubectl delete supergraphs ${RESOURCE_NAME} -n apollo || true - -# Delete SupergraphSchema resources kubectl delete supergraphschemas ${RESOURCE_NAME} -n apollo || true - -# Delete Ingress resources kubectl delete ingress router -n apollo || true kubectl delete ingress client -n client || true - -# Delete Subgraph resources (this will also stop schema publishing) kubectl delete subgraph --all --all-namespaces || true ``` @@ -40,13 +32,8 @@ kubectl delete subgraph --all --all-namespaces || true Uninstall all Helm releases: ```bash -# Uninstall client (if deployed) helm uninstall client -n client || true - -# Uninstall coprocessor (if deployed) helm uninstall coprocessor -n apollo || true - -# Uninstall subgraph Helm releases for subgraph in checkout discovery inventory orders products reviews shipping users; do helm uninstall $subgraph -n $subgraph || true done @@ -57,19 +44,10 @@ done Delete all application namespaces: ```bash -# Delete subgraph namespaces kubectl delete namespace checkout discovery inventory orders products reviews shipping users || true - -# Delete client namespace kubectl delete namespace client || true - -# Delete operator API key secret (contains sensitive data) kubectl delete secret apollo-api-key -n apollo-operator || true - -# Uninstall the Apollo GraphOS Operator helm uninstall apollo-operator -n apollo-operator || true - -# Delete operator namespaces kubectl delete namespace apollo-operator apollo || true ``` @@ -88,10 +66,7 @@ If you want to clean up the Apollo GraphOS graph and variants you created: If you want to completely remove the Minikube cluster: ```bash -# Stop the cluster minikube stop - -# Delete the cluster minikube delete ``` @@ -106,10 +81,7 @@ minikube delete --all If you want to remove the local Docker images built for this project: ```bash -# Configure Docker to use Minikube's daemon (if cluster is still running) eval $(minikube docker-env) - -# Remove local images docker rmi checkout:local discovery:local inventory:local orders:local \ products:local reviews:local shipping:local users:local \ coprocessor:local client:local || true diff --git a/docs/operator-guide.md b/docs/operator-guide.md index 14a1896..fca84f5 100644 --- a/docs/operator-guide.md +++ b/docs/operator-guide.md @@ -85,13 +85,8 @@ This matches any Subgraph CRD with the `apollo.io/subgraph` label, regardless of ### Check Subgraph Status ```bash -# List all subgraphs kubectl get subgraph --all-namespaces - -# Describe a specific subgraph kubectl describe subgraph checkout -n checkout - -# Watch subgraph status kubectl get subgraph -w ``` @@ -100,10 +95,7 @@ Look for `SchemaLoaded` condition in the status to verify schema extraction. ### Check Composition Status ```bash -# Get SupergraphSchema status kubectl get supergraphschema -n apollo - -# Describe for detailed status kubectl describe supergraphschema reference-architecture-dev -n apollo ``` @@ -115,10 +107,7 @@ Status conditions: ### Check Router Deployment ```bash -# Get supergraph status kubectl get supergraph -n apollo - -# Describe for detailed status kubectl describe supergraph reference-architecture-dev -n apollo ``` @@ -144,14 +133,7 @@ When you update a subgraph schema and redeploy the image: If you need to manually trigger composition: ```bash -# Edit the SupergraphSchema kubectl edit supergraphschema reference-architecture-dev -n apollo - -# Temporarily disable composition -# Set: compositionEnabled: false - -# Save and exit, then re-enable -# Set: compositionEnabled: true (or remove the field) ``` ## Troubleshooting @@ -202,10 +184,7 @@ Look for: ### Viewing Router Logs ```bash -# Get router pods kubectl get pods -n apollo - -# View logs kubectl logs -n apollo deployment/reference-architecture-{dev|prod} ``` @@ -214,11 +193,7 @@ kubectl logs -n apollo deployment/reference-architecture-{dev|prod} To update router configuration without changing subgraphs: ```bash -# Edit the Supergraph CRD kubectl edit supergraph reference-architecture-dev -n apollo - -# Update spec.podTemplate.router.configuration -# Save and the operator will roll out the changes ``` Changes are applied via rolling update - the operator manages the rollout. diff --git a/docs/setup.md b/docs/setup.md index 2d2635a..1c829e8 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -58,7 +58,7 @@ cp scripts/minikube/.env.sample .env ```bash export APOLLO_KEY="your-apollo-personal-api-key" -export ENVIRONMENT="dev" # Required: e.g., "dev", "prod", "staging" +export ENVIRONMENT="dev" ``` The `ENVIRONMENT` variable is required and allows you to create multiple environments. Each environment will have its own Apollo GraphOS variant. @@ -97,7 +97,7 @@ This script: ### Script 03: Setup Kubernetes Cluster ```bash -source .env # Load the variables set by script 02 +source .env ./scripts/minikube/03-setup-cluster.sh ``` @@ -185,7 +185,6 @@ After running all scripts, you can access your supergraph in several ways: The ingress controller has been configured as a LoadBalancer service. To access it via `minikube tunnel`: ```bash -# In a separate terminal, run: minikube tunnel ``` @@ -229,8 +228,6 @@ echo "Access at: http://${MINIKUBE_IP}:${NODEPORT}" Test the router with a simple GraphQL query: ```bash -# If using minikube service, it will show you the URL to use -# If using port forwarding, use http://localhost:4000 curl -X POST http://localhost:4000 \ -H "Content-Type: application/json" \ -d '{"query":"{ __typename }"}' @@ -285,13 +282,13 @@ export ENVIRONMENT="prod" 2. Run scripts 02-07 again with the new environment: ```bash -./scripts/minikube/02-setup-apollo-graph.sh # Creates prod variant +./scripts/minikube/02-setup-apollo-graph.sh source .env -./scripts/minikube/03-setup-cluster.sh # Uses same cluster -./scripts/minikube/04-build-images.sh # Reuses images -./scripts/minikube/05-deploy-subgraphs.sh # Deploys to prod namespaces -./scripts/minikube/06-deploy-operator-resources.sh # Creates prod router -./scripts/minikube/07-deploy-ingress.sh # Updates ingress +./scripts/minikube/03-setup-cluster.sh +./scripts/minikube/04-build-images.sh +./scripts/minikube/05-deploy-subgraphs.sh +./scripts/minikube/06-deploy-operator-resources.sh +./scripts/minikube/07-deploy-ingress.sh ``` Each environment will have: From bd30ced166a054fa2da7d13fbcd2049c37028e76 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 12:25:50 -0800 Subject: [PATCH 11/31] Add example for updating a single subgraph in operator guide documentation, detailing steps for schema update and monitoring progress. --- docs/operator-guide.md | 44 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/docs/operator-guide.md b/docs/operator-guide.md index fca84f5..e239ca5 100644 --- a/docs/operator-guide.md +++ b/docs/operator-guide.md @@ -128,6 +128,50 @@ When you update a subgraph schema and redeploy the image: 6. Supergraph fetches the new composed schema 7. Router is rolled out with the new schema +### Example: Updating a Single Subgraph + +To update a single subgraph (e.g., `products`), follow these steps: + +```bash +SUBGRAPH="products" +ENVIRONMENT="dev" + +eval $(minikube docker-env) +docker build -t "${SUBGRAPH}:local" "subgraphs/${SUBGRAPH}" + +SCHEMA_FILE="subgraphs/${SUBGRAPH}/schema.graphql" +SCHEMA_CONTENT=$(cat "$SCHEMA_FILE" | sed 's/^/ /') + +cat < Date: Mon, 10 Nov 2025 12:30:55 -0800 Subject: [PATCH 12/31] Refactor ProductCard and Products components for improved layout and responsiveness. Update styles for better visual hierarchy and user experience, including adjustments to flex properties, padding, and grid configurations. --- .../src/components/Products/ProductCard.tsx | 38 +++++++++++-------- client/src/components/Products/Products.tsx | 16 ++++++-- client/src/routes/root.tsx | 9 +++-- 3 files changed, 40 insertions(+), 23 deletions(-) diff --git a/client/src/components/Products/ProductCard.tsx b/client/src/components/Products/ProductCard.tsx index d6819fc..13f449d 100644 --- a/client/src/components/Products/ProductCard.tsx +++ b/client/src/components/Products/ProductCard.tsx @@ -28,31 +28,36 @@ export default function ProductCard(props: ProductCardProps) { bg="navy.400" borderRadius={'xl'} color="beige.400" + height="100%" + display="flex" + flexDirection="column" + overflow="hidden" > - +
My alt message
- - - {product.title} - - + + + + {product.title} + + {Array.from({ length: 5 }, (_, i) => ( ))} - {productDescription} + {productDescription} - - - ${product?.variants[0].price} - + + + ${product?.variants[0].price} + diff --git a/client/src/components/Header/LoggedOutMenu.tsx b/client/src/components/Header/LoggedOutMenu.tsx index babe0e9..28180bd 100644 --- a/client/src/components/Header/LoggedOutMenu.tsx +++ b/client/src/components/Header/LoggedOutMenu.tsx @@ -18,6 +18,9 @@ export default function LoggedOutMenu() { size={'sm'} mr={4} leftIcon={} + as={Link} + to="/login" + title="Please log in to view your cart" > diff --git a/client/src/components/Products/ProductCard.tsx b/client/src/components/Products/ProductCard.tsx index 13f449d..898e9b1 100644 --- a/client/src/components/Products/ProductCard.tsx +++ b/client/src/components/Products/ProductCard.tsx @@ -15,13 +15,23 @@ import { import ShowMoreText from 'react-show-more-text' import { StarIcon } from '../Icons/Star' import { Product } from '../../apollo/types' +import { useCart } from '../../hooks/useCart' type ProductCardProps = { product: Product } export default function ProductCard(props: ProductCardProps) { const { product } = props + const { addToCart, addingToCart } = useCart() const productDescription = product.description.padEnd(125) + const firstVariant = product?.variants?.[0] + + const handleAddToCart = () => { + if (firstVariant?.id) { + addToCart(String(firstVariant.id), 1) + } + } + return ( - ${product?.variants[0].price} + ${firstVariant?.price || '0.00'} + + + + )} + + + ) +} + diff --git a/client/src/routes/cart/index.tsx b/client/src/routes/cart/index.tsx new file mode 100644 index 0000000..deffea7 --- /dev/null +++ b/client/src/routes/cart/index.tsx @@ -0,0 +1,22 @@ +import { Alert, Card, CardBody, Center } from '@chakra-ui/react' +import { useAuth } from '../../hooks/useAuth' +import Cart from './Cart' + +export const RouteComponent = () => { + const { user, isLoggedIn } = useAuth() + + if (!isLoggedIn || !user) { + return ( +
+ + + Please log in to view your cart + + +
+ ) + } + + return +} + diff --git a/client/src/routes/user-profile/UserProfileForm.tsx b/client/src/routes/user-profile/UserProfileForm.tsx index 177faaf..869cb52 100644 --- a/client/src/routes/user-profile/UserProfileForm.tsx +++ b/client/src/routes/user-profile/UserProfileForm.tsx @@ -87,7 +87,7 @@ export default function UserProfileForm({ user }: Props) { placeholder="UserName" _placeholder={{ color: 'gray.500' }} type="text" - defaultValue={data?.user?.username} + defaultValue={data?.me?.username} /> From c4b13f492e76e9e16a8c55f004e678435226317d Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 13:36:48 -0800 Subject: [PATCH 14/31] Implement coprocessor for JWT authentication, update deployment scripts, and enhance router configuration. The coprocessor validates JWT tokens from the users subgraph's JWKS endpoint, enabling the `@authenticated` directive. Update setup documentation to reflect new deployment steps and ensure proper order of operations for deploying the coprocessor and router. --- coprocessor/package.json | 2 +- coprocessor/src/index.ts | 109 ++++++++++++++++-- deploy/operator-resources/router-config.yaml | 56 +++++++++ docs/setup.md | 38 ++++-- scripts/minikube/05-deploy-subgraphs.sh | 3 +- scripts/minikube/06-deploy-coprocessor.sh | 92 +++++++++++++++ ...ces.sh => 07-deploy-operator-resources.sh} | 32 ++++- ...deploy-ingress.sh => 08-deploy-ingress.sh} | 8 +- ...8-deploy-client.sh => 09-deploy-client.sh} | 6 +- 9 files changed, 315 insertions(+), 31 deletions(-) create mode 100644 deploy/operator-resources/router-config.yaml create mode 100755 scripts/minikube/06-deploy-coprocessor.sh rename scripts/minikube/{06-deploy-operator-resources.sh => 07-deploy-operator-resources.sh} (69%) rename scripts/minikube/{07-deploy-ingress.sh => 08-deploy-ingress.sh} (97%) rename scripts/minikube/{08-deploy-client.sh => 09-deploy-client.sh} (96%) diff --git a/coprocessor/package.json b/coprocessor/package.json index f630ae7..0d10d55 100644 --- a/coprocessor/package.json +++ b/coprocessor/package.json @@ -19,7 +19,7 @@ "license": "MIT", "dependencies": { "express": "^5.1.0", - "jsonwebtoken": "^9.0.0" + "jose": "^5.9.0" }, "devDependencies": { "@types/express": "^4.17.21", diff --git a/coprocessor/src/index.ts b/coprocessor/src/index.ts index 886d705..409efea 100644 --- a/coprocessor/src/index.ts +++ b/coprocessor/src/index.ts @@ -1,33 +1,124 @@ import express from "express"; import { CoprocessorRequest, CoprocessorStage } from "./types"; +import * as jose from 'jose'; + +const JWKS_URL = process.env.JWKS_URL || "http://graphql.users.svc.cluster.local:4001/.well-known/jwks.json"; +let jwksCache: ReturnType | null = null; +let jwksCacheTime: number = 0; +const JWKS_CACHE_TTL = 3600000; // 1 hour in milliseconds + +/** + * Fetches JWKS from the users subgraph service + */ +async function getJWKS(): Promise> { + const now = Date.now(); + + // Return cached JWKS if still valid + if (jwksCache && (now - jwksCacheTime) < JWKS_CACHE_TTL) { + return jwksCache; + } + + try { + const response = await fetch(JWKS_URL); + if (!response.ok) { + throw new Error(`Failed to fetch JWKS: ${response.statusText}`); + } + const jwks = await response.json(); + jwksCache = jose.createLocalJWKSet(jwks); + jwksCacheTime = now; + return jwksCache; + } catch (error) { + console.error("Error fetching JWKS:", error); + // Return cached JWKS if available, even if expired + if (jwksCache) { + return jwksCache; + } + throw error; + } +} + +/** + * Validates JWT token from authorization header + */ +async function validateToken(token: string | undefined): Promise { + if (!token) { + return false; + } + + // Extract token from "Bearer " format + const tokenValue = token.startsWith("Bearer ") ? token.substring(7) : token; + + try { + const jwks = await getJWKS(); + await jose.jwtVerify(tokenValue.trim(), jwks); + return true; + } catch (error) { + console.error("JWT validation error:", error); + return false; + } +} /** * Handles a coprocessor request - * Adds a "source" header to the request to all stages besides the SubgraphRequest stage + * Validates JWT authentication for RouterRequest stage + * Adds a "source" header to SubgraphRequest stage * * @param req - The request object * @param res - The response object */ -function handleCoprocessorRequest( +async function handleCoprocessorRequest( req: CoprocessorRequest, res: express.Response -): void { - if (req.body.stage !== CoprocessorStage.SUBGRAPH_REQUEST) { - res.json(req.body); +): Promise { + const payload = req.body; + + // Handle RouterRequest stage - validate authentication + if (payload.stage === CoprocessorStage.ROUTER_REQUEST) { + const authHeader = payload.headers.authorization?.[0]; + const isValid = await validateToken(authHeader); + + if (!isValid) { + // Return 401 Unauthorized if token is invalid or missing + res.json({ + ...payload, + control: { + break: 401, + }, + }); + return; + } + + // Token is valid, continue with the request + res.json({ + ...payload, + control: "continue", + }); return; } - const payload = req.body; - - payload.headers["source"] = ["coprocessor"]; + // Handle SubgraphRequest stage - add source header + if (payload.stage === CoprocessorStage.SUBGRAPH_REQUEST) { + payload.headers["source"] = ["coprocessor"]; + res.json(payload); + return; + } + // For all other stages, pass through unchanged res.json(payload); } const port = process.env.PORT || 8081; const app = express(); app.use(express.json()); -app.post("/", handleCoprocessorRequest); +app.post("/", async (req, res) => { + try { + await handleCoprocessorRequest(req as CoprocessorRequest, res); + } catch (error) { + console.error("Error handling coprocessor request:", error); + res.status(500).json({ error: "Internal server error" }); + } +}); app.listen(port, () => { console.log(`🚀 Coprocessor running on port ${port}`); + console.log(`JWKS URL: ${JWKS_URL}`); }); diff --git a/deploy/operator-resources/router-config.yaml b/deploy/operator-resources/router-config.yaml new file mode 100644 index 0000000..fa47782 --- /dev/null +++ b/deploy/operator-resources/router-config.yaml @@ -0,0 +1,56 @@ +# Router configuration for Apollo Router +# This file is loaded via ConfigMap and referenced in the Supergraph CRD +# Based on: https://github.com/apollosolutions/reference-architecture/blob/main/deploy/router/values.yaml + +supergraph: + listen: 0.0.0.0:4000 + introspection: true + +headers: + all: + request: + - propagate: + matching: .* + +authentication: + router: + jwt: + jwks: + - url: http://graphql.users.svc.cluster.local:4001/.well-known/jwks.json + +authorization: + directives: + enabled: true + +cors: + allow_any_origin: true + +coprocessor: + url: http://coprocessor.apollo.svc.cluster.local:8081 + timeout: 2s + router: + request: + headers: true + subgraph: + all: + request: + headers: true + response: + headers: true + +health_check: + listen: 0.0.0.0:8080 + +sandbox: + enabled: true + +homepage: + enabled: false + +include_subgraph_errors: + all: true + +plugins: + experimental: + expose_query_plan: true + diff --git a/docs/setup.md b/docs/setup.md index 1c829e8..822ac79 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -136,17 +136,35 @@ kubectl get subgraphs --all-namespaces kubectl get pods --all-namespaces ``` -### Script 06: Deploy Operator Resources +### Script 06: Deploy Coprocessor + +The coprocessor handles JWT authentication for the `@authenticated` directive and is required for the router to function properly. ```bash -./scripts/minikube/06-deploy-operator-resources.sh +./scripts/minikube/06-deploy-coprocessor.sh +``` + +This script: +- Builds the coprocessor Docker image (if not already built) +- Deploys the coprocessor using Helm +- Waits for coprocessor pods to be ready + +**Note:** The coprocessor validates JWT tokens from the users subgraph's JWKS endpoint and enables the `@authenticated` directive to work properly. It must be deployed before the router (script 07). + +### Script 07: Deploy Operator Resources + +```bash +./scripts/minikube/07-deploy-operator-resources.sh ``` This script: - Deploys SupergraphSchema CRD (triggers composition) - Deploys Supergraph CRD (deploys the Apollo Router) +- Configures the router to use the coprocessor (required) - Waits for the router to be ready +**Note:** The coprocessor (script 06) must be deployed before running this script, as the router configuration requires it. + Monitor router deployment: ```bash @@ -155,10 +173,10 @@ kubectl get pods -n apollo kubectl describe supergraph reference-architecture-${ENVIRONMENT} -n apollo ``` -### Script 07: Deploy Ingress +### Script 08: Deploy Ingress ```bash -./scripts/minikube/07-deploy-ingress.sh +./scripts/minikube/08-deploy-ingress.sh ``` This script: @@ -166,10 +184,10 @@ This script: - Configures the ingress controller as LoadBalancer for `minikube tunnel` support - Provides access URLs for the router -### Script 08: Deploy Client (Optional) +### Script 09: Deploy Client (Optional) ```bash -./scripts/minikube/08-deploy-client.sh +./scripts/minikube/09-deploy-client.sh ``` This script: @@ -279,7 +297,7 @@ To create a new environment (e.g., "prod"): export ENVIRONMENT="prod" ``` -2. Run scripts 02-07 again with the new environment: +2. Run scripts 02-09 again with the new environment: ```bash ./scripts/minikube/02-setup-apollo-graph.sh @@ -287,8 +305,10 @@ source .env ./scripts/minikube/03-setup-cluster.sh ./scripts/minikube/04-build-images.sh ./scripts/minikube/05-deploy-subgraphs.sh -./scripts/minikube/06-deploy-operator-resources.sh -./scripts/minikube/07-deploy-ingress.sh +./scripts/minikube/06-deploy-coprocessor.sh +./scripts/minikube/07-deploy-operator-resources.sh +./scripts/minikube/08-deploy-ingress.sh +./scripts/minikube/09-deploy-client.sh ``` Each environment will have: diff --git a/scripts/minikube/05-deploy-subgraphs.sh b/scripts/minikube/05-deploy-subgraphs.sh index ca5639f..54f8785 100755 --- a/scripts/minikube/05-deploy-subgraphs.sh +++ b/scripts/minikube/05-deploy-subgraphs.sh @@ -105,5 +105,6 @@ echo "" echo "Monitor subgraph status with:" echo " kubectl get subgraphs --all-namespaces" echo "" -echo "Next step: Run 06-deploy-operator-resources.sh to deploy the router" +echo "Next step: Run 06-deploy-coprocessor.sh to deploy the coprocessor" +echo " Then run 07-deploy-operator-resources.sh to deploy the router" diff --git a/scripts/minikube/06-deploy-coprocessor.sh b/scripts/minikube/06-deploy-coprocessor.sh new file mode 100755 index 0000000..70ad805 --- /dev/null +++ b/scripts/minikube/06-deploy-coprocessor.sh @@ -0,0 +1,92 @@ +#!/bin/bash +set -euo pipefail + +# Script 06: Deploy Coprocessor +# This script deploys the coprocessor using Helm +# The coprocessor is required for JWT authentication and the @authenticated directive + +echo "=== Step 06: Deploying Coprocessor ===" + +# Load environment variables from .env if it exists +if [ -f .env ]; then + echo "Loading environment variables from .env..." + source .env +fi + +# Validate required variables +if [[ -z "${ENVIRONMENT:-}" ]]; then + echo "Error: ENVIRONMENT is required" + echo "Please set ENVIRONMENT in your .env file or export it:" + echo " export ENVIRONMENT=\"dev\"" + exit 1 +fi + +# Check if kubectl is available +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed" + exit 1 +fi + +# Check if helm is available +if ! command -v helm &> /dev/null; then + echo "Error: helm is not installed" + exit 1 +fi + +# Verify cluster connection +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + exit 1 +fi + +# Ensure apollo namespace exists +kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - + +# Check if coprocessor image exists +echo "Checking if coprocessor image exists..." +if ! docker images | grep -q "coprocessor.*local"; then + echo "Warning: coprocessor:local image not found" + echo "Building coprocessor image..." + eval $(minikube docker-env) + docker build -t "coprocessor:local" "coprocessor" + if [ $? -eq 0 ]; then + echo "✓ Successfully built coprocessor:local" + else + echo "✗ Failed to build coprocessor:local" + exit 1 + fi +fi + +# Deploy coprocessor using Helm +echo "Deploying coprocessor using Helm..." +helm upgrade --install coprocessor deploy/coprocessor \ + --namespace apollo \ + --wait \ + --timeout 5m + +if [ $? -eq 0 ]; then + echo "✓ Coprocessor deployed successfully" +else + echo "✗ Failed to deploy coprocessor" + exit 1 +fi + +# Wait for coprocessor pods to be ready +echo "Waiting for coprocessor pods to be ready..." +kubectl wait --for=condition=ready pod \ + -l app.kubernetes.io/name=coprocessor \ + -n apollo \ + --timeout=300s || true + +# Check coprocessor service +echo "Checking coprocessor service..." +kubectl get svc coprocessor -n apollo + +echo "" +echo "✓ Coprocessor deployment complete!" +echo "" +echo "Coprocessor is now available at:" +echo " http://coprocessor.apollo.svc.cluster.local:8081" +echo "" +echo "Next step: Run 07-deploy-operator-resources.sh to deploy the router with coprocessor configuration" + diff --git a/scripts/minikube/06-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh similarity index 69% rename from scripts/minikube/06-deploy-operator-resources.sh rename to scripts/minikube/07-deploy-operator-resources.sh index 2e67f86..4ca72bf 100755 --- a/scripts/minikube/06-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -1,10 +1,11 @@ #!/bin/bash set -euo pipefail -# Script 06: Deploy Operator Resources +# Script 07: Deploy Operator Resources # This script deploys SupergraphSchema and Supergraph CRDs +# Note: The coprocessor (script 06) must be deployed first as the router requires it -echo "=== Step 06: Deploying Operator Resources ===" +echo "=== Step 07: Deploying Operator Resources ===" # Load environment variables from .env if it exists if [ -f .env ]; then @@ -44,6 +45,15 @@ kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - # Resource name based on environment RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" +# Create router configuration ConfigMap +echo "Creating router configuration ConfigMap..." +kubectl create configmap router-config \ + --from-file=router.yaml=deploy/operator-resources/router-config.yaml \ + -n apollo \ + --dry-run=client -o yaml | kubectl apply -f - + +echo "Router configuration ConfigMap created" + # Deploy SupergraphSchema echo "Deploying SupergraphSchema..." cat <> "$ENV_FILE" - echo "# Router URL (generated by 07-deploy-ingress.sh)" >> "$ENV_FILE" + echo "# Router URL (generated by 08-deploy-ingress.sh)" >> "$ENV_FILE" echo "export ROUTER_URL=\"$ROUTER_URL\"" >> "$ENV_FILE" fi @@ -156,6 +156,6 @@ echo "to support minikube tunnel. The router is accessed via the client's nginx echo "" echo "The router URL has been saved to .env and will be used by the client deployment." echo "" -echo "Next step: Run 08-deploy-client.sh to deploy the client application (optional)" +echo "Next step: Run 09-deploy-client.sh to deploy the client application (optional)" diff --git a/scripts/minikube/08-deploy-client.sh b/scripts/minikube/09-deploy-client.sh similarity index 96% rename from scripts/minikube/08-deploy-client.sh rename to scripts/minikube/09-deploy-client.sh index 623e7d2..77e09f1 100755 --- a/scripts/minikube/08-deploy-client.sh +++ b/scripts/minikube/09-deploy-client.sh @@ -1,10 +1,10 @@ #!/bin/bash set -euo pipefail -# Script 08: Deploy Client +# Script 09: Deploy Client # This script deploys the client application (optional) -echo "=== Step 08: Deploying Client Application ===" +echo "=== Step 09: Deploying Client Application ===" # Load environment variables from .env if it exists if [ -f .env ]; then @@ -41,7 +41,7 @@ fi # Get router URL from .env file if [[ -z "${ROUTER_URL:-}" ]]; then echo "Error: ROUTER_URL is not set" - echo "Please run 07-deploy-ingress.sh first to set up the router URL" + echo "Please run 08-deploy-ingress.sh first to set up the router URL" exit 1 fi From 415f2da8bbedfcccf778e68be96555f94f217ed5 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 13:48:06 -0800 Subject: [PATCH 15/31] Enhance router configuration and deployment script: update router-config.yaml to clarify ConfigMap usage and patch router deployment to mount ConfigMap and set configuration arguments. Add checks for existing volumes and arguments to prevent duplication during deployment. --- deploy/operator-resources/router-config.yaml | 11 +- .../minikube/07-deploy-operator-resources.sh | 103 ++++++++++++++++-- 2 files changed, 100 insertions(+), 14 deletions(-) diff --git a/deploy/operator-resources/router-config.yaml b/deploy/operator-resources/router-config.yaml index fa47782..e255184 100644 --- a/deploy/operator-resources/router-config.yaml +++ b/deploy/operator-resources/router-config.yaml @@ -1,6 +1,13 @@ # Router configuration for Apollo Router -# This file is loaded via ConfigMap and referenced in the Supergraph CRD -# Based on: https://github.com/apollosolutions/reference-architecture/blob/main/deploy/router/values.yaml +# +# This configuration is loaded via a ConfigMap and mounted into the router pods. +# The script 07-deploy-operator-resources.sh: +# 1. Creates a ConfigMap from this file +# 2. Patches the router deployment to mount the ConfigMap as a volume at /etc/router +# 3. Patches the router deployment to add --config /etc/router/router.yaml as container args +# +# Note: The Apollo GraphOS Operator CRD does not support router configuration +# in the Supergraph CRD, so we patch the deployment manually after it's created. supergraph: listen: 0.0.0.0:4000 diff --git a/scripts/minikube/07-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh index 4ca72bf..2c9d4e6 100755 --- a/scripts/minikube/07-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -95,18 +95,6 @@ spec: requests: cpu: 100m memory: 256Mi - extraVolumes: - - name: router-config - configMap: - name: router-config - extraVolumeMounts: - - name: router-config - mountPath: /etc/router - readOnly: true - router: - args: - - --config - - /etc/router/router.yaml schema: resource: name: ${RESOURCE_NAME} @@ -115,6 +103,91 @@ EOF echo "Supergraph deployed" +# Wait for router deployment to be created +echo "Waiting for router deployment to be created..." +DEPLOYMENT_NAME="${RESOURCE_NAME}" +for i in {1..60}; do + if kubectl get deployment ${DEPLOYMENT_NAME} -n apollo &>/dev/null; then + echo "Router deployment found" + break + fi + echo " Waiting for deployment... ($i/60)" + sleep 2 +done + +if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo &>/dev/null; then + echo "Error: Router deployment not found after waiting" + echo "Please check the Supergraph status:" + echo " kubectl get supergraph ${RESOURCE_NAME} -n apollo" + exit 1 +fi + +# Patch the router deployment to mount the ConfigMap and use it +echo "Patching router deployment to use ConfigMap..." + +# Check if volume already exists, if not add it +if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "router-config"; then + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "router-config", + "configMap": { + "name": "router-config" + } + } + } + ]' + echo " Added router-config volume" +else + echo " Volume already exists" +fi + +# Check if volumeMount already exists, if not add it +if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "router-config"; then + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "router-config", + "mountPath": "/etc/router", + "readOnly": true + } + } + ]' + echo " Added router-config volumeMount" +else + echo " VolumeMount already exists" +fi + +# Check if --config args already exist, if not add them +CURRENT_ARGS=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args[*]}' || echo "") +if [[ ! "$CURRENT_ARGS" =~ "--config" ]]; then + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--config" + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "/etc/router/router.yaml" + } + ]' + echo " Added --config arguments" +else + echo " --config arguments already exist" +fi + +echo "Router deployment patched" + +# Wait for rollout to complete +echo "Waiting for router rollout to complete..." +kubectl rollout status deployment/${DEPLOYMENT_NAME} -n apollo --timeout=300s || true + # Wait for router to be ready echo "Waiting for router to be ready..." kubectl wait --for=condition=ready --timeout=300s supergraph/${RESOURCE_NAME} -n apollo || true @@ -122,9 +195,15 @@ kubectl wait --for=condition=ready --timeout=300s supergraph/${RESOURCE_NAME} -n echo "" echo "✓ Operator resources deployed!" echo "" +echo "Router configuration has been applied via ConfigMap:" +echo " ConfigMap: router-config (contains router.yaml)" +echo " Mounted at: /etc/router/router.yaml" +echo " Router args: --config /etc/router/router.yaml" +echo "" echo "Monitor router status with:" echo " kubectl get supergraphs -n apollo" echo " kubectl get pods -n apollo" +echo " kubectl logs -n apollo deployment/${DEPLOYMENT_NAME}" echo "" echo "Next step: Run 08-deploy-ingress.sh to setup external access" From 38e3b78f1a940ad1fefe5e1d9c4a6afb1156a5b7 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 13:59:08 -0800 Subject: [PATCH 16/31] Update router configuration and deployment script: change health check port in router-config.yaml, enhance deployment script to replace operator's ConfigMap and volumeMount with custom configurations, and ensure proper handling of existing arguments to avoid duplication during deployment. --- deploy/operator-resources/router-config.yaml | 9 +- .../minikube/07-deploy-operator-resources.sh | 274 ++++++++++++++++-- 2 files changed, 246 insertions(+), 37 deletions(-) diff --git a/deploy/operator-resources/router-config.yaml b/deploy/operator-resources/router-config.yaml index e255184..97c1385 100644 --- a/deploy/operator-resources/router-config.yaml +++ b/deploy/operator-resources/router-config.yaml @@ -46,7 +46,7 @@ coprocessor: headers: true health_check: - listen: 0.0.0.0:8080 + listen: 0.0.0.0:8088 sandbox: enabled: true @@ -54,10 +54,3 @@ sandbox: homepage: enabled: false -include_subgraph_errors: - all: true - -plugins: - experimental: - expose_query_plan: true - diff --git a/scripts/minikube/07-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh index 2c9d4e6..75b31cb 100755 --- a/scripts/minikube/07-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -125,46 +125,264 @@ fi # Patch the router deployment to mount the ConfigMap and use it echo "Patching router deployment to use ConfigMap..." -# Check if volume already exists, if not add it -if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "router-config"; then - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "router-config", - "configMap": { - "name": "router-config" +# Check if operator's ConfigMap volume exists and replace it with ours +# The operator creates a volume that points to a ConfigMap with name pattern reference-architecture-*-config-* +VOLUMES_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes}' || echo "[]") +VOLUME_INDEX=-1 +INDEX=0 +OPERATOR_CONFIGMAP_FOUND=false +VOLUME_NAME="" + +# Check each volume to see if it points to the operator's ConfigMap +for vol_json in $(echo "$VOLUMES_JSON" | jq -c '.[]'); do + CONFIGMAP_NAME=$(echo "$vol_json" | jq -r '.configMap.name // ""') + if [[ -n "$CONFIGMAP_NAME" && "$CONFIGMAP_NAME" =~ ^reference-architecture.*-config- ]]; then + VOLUME_INDEX=$INDEX + OPERATOR_CONFIGMAP_FOUND=true + VOLUME_NAME=$(echo "$vol_json" | jq -r '.name') + echo " Found operator ConfigMap volume '$VOLUME_NAME' pointing to '$CONFIGMAP_NAME'" + break + fi + INDEX=$((INDEX + 1)) +done + +if [[ "$OPERATOR_CONFIGMAP_FOUND" == "true" ]]; then + echo " Replacing with our router-config ConfigMap..." + + if [[ $VOLUME_INDEX -ge 0 ]]; then + # Replace the operator's ConfigMap volume with ours + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/volumes/$VOLUME_INDEX\", + \"value\": { + \"name\": \"router-config\", + \"configMap\": { + \"name\": \"router-config\" + } } } + ]" && echo " Replaced operator ConfigMap volume with router-config" || { + echo " Warning: Failed to replace volume, trying add instead..." + # Fallback: add our volume (will have both, but ours will be used if mounted) + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "router-config", + "configMap": { + "name": "router-config" + } + } + } + ]' } - ]' - echo " Added router-config volume" + else + # Couldn't find index, just add ours + echo " Could not find operator volume index, adding router-config volume..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "router-config", + "configMap": { + "name": "router-config" + } + } + } + ]' + fi else - echo " Volume already exists" + # No operator volume found, check if our volume exists + if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "router-config"; then + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "router-config", + "configMap": { + "name": "router-config" + } + } + } + ]' + echo " Added router-config volume" + else + echo " router-config volume already exists" + fi fi # Check if volumeMount already exists, if not add it -if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "router-config"; then - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "router-config", - "mountPath": "/etc/router", - "readOnly": true +# Also check if operator's volumeMount exists and replace it +MOUNTS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts}' || echo "[]") +MOUNT_INDEX=-1 +INDEX=0 +OPERATOR_MOUNT_FOUND=false + +# Find volumeMount that matches the operator's volume name or has wrong mount path +# Also check if there's a mount at /app (operator's default path) that needs replacing +for mount_json in $(echo "$MOUNTS_JSON" | jq -c '.[]'); do + MOUNT_NAME=$(echo "$mount_json" | jq -r '.name') + MOUNT_PATH=$(echo "$mount_json" | jq -r '.mountPath') + # Check if this mount points to the operator's volume name, or if it's mounted at /app (operator's default) + if [[ -n "$VOLUME_NAME" && "$MOUNT_NAME" == "$VOLUME_NAME" ]] || [[ "$MOUNT_PATH" == "/app" ]]; then + MOUNT_INDEX=$INDEX + OPERATOR_MOUNT_FOUND=true + echo " Found volumeMount '$MOUNT_NAME' at path '$MOUNT_PATH'" + break + fi + INDEX=$((INDEX + 1)) +done + +if [[ "$OPERATOR_MOUNT_FOUND" == "true" ]]; then + echo " Replacing with router-config volumeMount at /etc/router..." + + if [[ $MOUNT_INDEX -ge 0 ]]; then + # Replace the operator's volumeMount with ours + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/containers/0/volumeMounts/$MOUNT_INDEX\", + \"value\": { + \"name\": \"router-config\", + \"mountPath\": \"/etc/router\", + \"readOnly\": true + } } + ]" && echo " Replaced operator volumeMount with router-config" || { + echo " Warning: Failed to replace volumeMount, trying add instead..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "router-config", + "mountPath": "/etc/router", + "readOnly": true + } + } + ]' } - ]' - echo " Added router-config volumeMount" + else + # Couldn't find index, just add ours + echo " Could not find operator mount index, adding router-config volumeMount..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "router-config", + "mountPath": "/etc/router", + "readOnly": true + } + } + ]' + fi else - echo " VolumeMount already exists" + # No operator mount found, check if our mount exists + if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "router-config"; then + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "router-config", + "mountPath": "/etc/router", + "readOnly": true + } + } + ]' + echo " Added router-config volumeMount" + else + echo " router-config volumeMount already exists" + fi fi -# Check if --config args already exist, if not add them +# Check if --config args exist and replace them if needed CURRENT_ARGS=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args[*]}' || echo "") -if [[ ! "$CURRENT_ARGS" =~ "--config" ]]; then +if [[ "$CURRENT_ARGS" =~ "--config" ]]; then + # The operator already set --config, we need to replace it + # Get the full args array as JSON + ARGS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' || echo "[]") + + # Find the index of --config using a simple approach + # Convert JSON array to space-separated and find index + ARGS_LIST=$(echo "$ARGS_JSON" | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') + CONFIG_INDEX=-1 + INDEX=0 + for arg in $ARGS_LIST; do + if [[ "$arg" == "--config" ]]; then + CONFIG_INDEX=$INDEX + break + fi + INDEX=$((INDEX + 1)) + done + + if [[ $CONFIG_INDEX -ge 0 ]]; then + # Replace the --config argument and the following path argument + NEXT_INDEX=$((CONFIG_INDEX + 1)) + echo " Replacing existing --config argument at index $CONFIG_INDEX..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/containers/0/args/$CONFIG_INDEX\", + \"value\": \"--config\" + }, + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/containers/0/args/$NEXT_INDEX\", + \"value\": \"/etc/router/router.yaml\" + } + ]" && echo " Successfully replaced --config arguments" || { + echo " Warning: Replace failed, trying remove-then-add approach..." + # Fallback: remove old args, then add new ones + # Remove in reverse order to avoid index shifting + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"remove\", + \"path\": \"/spec/template/spec/containers/0/args/$NEXT_INDEX\" + }, + { + \"op\": \"remove\", + \"path\": \"/spec/template/spec/containers/0/args/$CONFIG_INDEX\" + } + ]" 2>/dev/null || true + # Add new --config args + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--config" + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "/etc/router/router.yaml" + } + ]' + echo " Added new --config arguments" + } + else + echo " Warning: Could not find --config index, adding new --config arguments..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--config" + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "/etc/router/router.yaml" + } + ]' + fi +else + # No --config exists, add it + echo " Adding --config arguments..." kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ { "op": "add", @@ -178,8 +396,6 @@ if [[ ! "$CURRENT_ARGS" =~ "--config" ]]; then } ]' echo " Added --config arguments" -else - echo " --config arguments already exist" fi echo "Router deployment patched" From 411742823933eb40c3b7109f0c97a81d2eb6a631 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 14:09:57 -0800 Subject: [PATCH 17/31] Refactor setup documentation and deployment scripts: rename and clarify script functions, update deployment steps for router configuration, and ensure proper sequence for running scripts. Add new scripts for applying router configuration and deploying ingress, while enhancing error handling and user guidance. --- docs/setup.md | 34 +- .../minikube/07-deploy-operator-resources.sh | 295 +------------- scripts/minikube/08-apply-router-config.sh | 365 ++++++++++++++++++ ...deploy-ingress.sh => 09-deploy-ingress.sh} | 8 +- ...9-deploy-client.sh => 10-deploy-client.sh} | 6 +- 5 files changed, 399 insertions(+), 309 deletions(-) create mode 100755 scripts/minikube/08-apply-router-config.sh rename scripts/minikube/{08-deploy-ingress.sh => 09-deploy-ingress.sh} (97%) rename scripts/minikube/{09-deploy-client.sh => 10-deploy-client.sh} (96%) diff --git a/docs/setup.md b/docs/setup.md index 822ac79..4261e33 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -158,12 +158,12 @@ This script: ``` This script: +- Creates the router-config ConfigMap - Deploys SupergraphSchema CRD (triggers composition) - Deploys Supergraph CRD (deploys the Apollo Router) -- Configures the router to use the coprocessor (required) -- Waits for the router to be ready +- Waits for the router deployment to be created -**Note:** The coprocessor (script 06) must be deployed before running this script, as the router configuration requires it. +**Note:** The coprocessor (script 06) must be deployed before running this script. Monitor router deployment: @@ -173,10 +173,23 @@ kubectl get pods -n apollo kubectl describe supergraph reference-architecture-${ENVIRONMENT} -n apollo ``` -### Script 08: Deploy Ingress +### Script 08: Apply Router Configuration ```bash -./scripts/minikube/08-deploy-ingress.sh +./scripts/minikube/08-apply-router-config.sh +``` + +This script: +- Patches the router deployment to mount the router-config ConfigMap +- Configures the router to use custom settings (coprocessor, CORS, etc.) +- Waits for the router rollout to complete + +**Note:** Script 07 must be run first to create the Supergraph and ConfigMap. + +### Script 09: Deploy Ingress + +```bash +./scripts/minikube/09-deploy-ingress.sh ``` This script: @@ -184,10 +197,10 @@ This script: - Configures the ingress controller as LoadBalancer for `minikube tunnel` support - Provides access URLs for the router -### Script 09: Deploy Client (Optional) +### Script 10: Deploy Client (Optional) ```bash -./scripts/minikube/09-deploy-client.sh +./scripts/minikube/10-deploy-client.sh ``` This script: @@ -297,7 +310,7 @@ To create a new environment (e.g., "prod"): export ENVIRONMENT="prod" ``` -2. Run scripts 02-09 again with the new environment: +2. Run scripts 02-10 again with the new environment: ```bash ./scripts/minikube/02-setup-apollo-graph.sh @@ -307,8 +320,9 @@ source .env ./scripts/minikube/05-deploy-subgraphs.sh ./scripts/minikube/06-deploy-coprocessor.sh ./scripts/minikube/07-deploy-operator-resources.sh -./scripts/minikube/08-deploy-ingress.sh -./scripts/minikube/09-deploy-client.sh +./scripts/minikube/08-apply-router-config.sh +./scripts/minikube/09-deploy-ingress.sh +./scripts/minikube/10-deploy-client.sh ``` Each environment will have: diff --git a/scripts/minikube/07-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh index 75b31cb..9cb8ffd 100755 --- a/scripts/minikube/07-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -122,304 +122,15 @@ if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo &>/dev/null; then exit 1 fi -# Patch the router deployment to mount the ConfigMap and use it -echo "Patching router deployment to use ConfigMap..." - -# Check if operator's ConfigMap volume exists and replace it with ours -# The operator creates a volume that points to a ConfigMap with name pattern reference-architecture-*-config-* -VOLUMES_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes}' || echo "[]") -VOLUME_INDEX=-1 -INDEX=0 -OPERATOR_CONFIGMAP_FOUND=false -VOLUME_NAME="" - -# Check each volume to see if it points to the operator's ConfigMap -for vol_json in $(echo "$VOLUMES_JSON" | jq -c '.[]'); do - CONFIGMAP_NAME=$(echo "$vol_json" | jq -r '.configMap.name // ""') - if [[ -n "$CONFIGMAP_NAME" && "$CONFIGMAP_NAME" =~ ^reference-architecture.*-config- ]]; then - VOLUME_INDEX=$INDEX - OPERATOR_CONFIGMAP_FOUND=true - VOLUME_NAME=$(echo "$vol_json" | jq -r '.name') - echo " Found operator ConfigMap volume '$VOLUME_NAME' pointing to '$CONFIGMAP_NAME'" - break - fi - INDEX=$((INDEX + 1)) -done - -if [[ "$OPERATOR_CONFIGMAP_FOUND" == "true" ]]; then - echo " Replacing with our router-config ConfigMap..." - - if [[ $VOLUME_INDEX -ge 0 ]]; then - # Replace the operator's ConfigMap volume with ours - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ - { - \"op\": \"replace\", - \"path\": \"/spec/template/spec/volumes/$VOLUME_INDEX\", - \"value\": { - \"name\": \"router-config\", - \"configMap\": { - \"name\": \"router-config\" - } - } - } - ]" && echo " Replaced operator ConfigMap volume with router-config" || { - echo " Warning: Failed to replace volume, trying add instead..." - # Fallback: add our volume (will have both, but ours will be used if mounted) - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "router-config", - "configMap": { - "name": "router-config" - } - } - } - ]' - } - else - # Couldn't find index, just add ours - echo " Could not find operator volume index, adding router-config volume..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "router-config", - "configMap": { - "name": "router-config" - } - } - } - ]' - fi -else - # No operator volume found, check if our volume exists - if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "router-config"; then - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "router-config", - "configMap": { - "name": "router-config" - } - } - } - ]' - echo " Added router-config volume" - else - echo " router-config volume already exists" - fi -fi - -# Check if volumeMount already exists, if not add it -# Also check if operator's volumeMount exists and replace it -MOUNTS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts}' || echo "[]") -MOUNT_INDEX=-1 -INDEX=0 -OPERATOR_MOUNT_FOUND=false - -# Find volumeMount that matches the operator's volume name or has wrong mount path -# Also check if there's a mount at /app (operator's default path) that needs replacing -for mount_json in $(echo "$MOUNTS_JSON" | jq -c '.[]'); do - MOUNT_NAME=$(echo "$mount_json" | jq -r '.name') - MOUNT_PATH=$(echo "$mount_json" | jq -r '.mountPath') - # Check if this mount points to the operator's volume name, or if it's mounted at /app (operator's default) - if [[ -n "$VOLUME_NAME" && "$MOUNT_NAME" == "$VOLUME_NAME" ]] || [[ "$MOUNT_PATH" == "/app" ]]; then - MOUNT_INDEX=$INDEX - OPERATOR_MOUNT_FOUND=true - echo " Found volumeMount '$MOUNT_NAME' at path '$MOUNT_PATH'" - break - fi - INDEX=$((INDEX + 1)) -done - -if [[ "$OPERATOR_MOUNT_FOUND" == "true" ]]; then - echo " Replacing with router-config volumeMount at /etc/router..." - - if [[ $MOUNT_INDEX -ge 0 ]]; then - # Replace the operator's volumeMount with ours - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ - { - \"op\": \"replace\", - \"path\": \"/spec/template/spec/containers/0/volumeMounts/$MOUNT_INDEX\", - \"value\": { - \"name\": \"router-config\", - \"mountPath\": \"/etc/router\", - \"readOnly\": true - } - } - ]" && echo " Replaced operator volumeMount with router-config" || { - echo " Warning: Failed to replace volumeMount, trying add instead..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "router-config", - "mountPath": "/etc/router", - "readOnly": true - } - } - ]' - } - else - # Couldn't find index, just add ours - echo " Could not find operator mount index, adding router-config volumeMount..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "router-config", - "mountPath": "/etc/router", - "readOnly": true - } - } - ]' - fi -else - # No operator mount found, check if our mount exists - if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "router-config"; then - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "router-config", - "mountPath": "/etc/router", - "readOnly": true - } - } - ]' - echo " Added router-config volumeMount" - else - echo " router-config volumeMount already exists" - fi -fi - -# Check if --config args exist and replace them if needed -CURRENT_ARGS=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args[*]}' || echo "") -if [[ "$CURRENT_ARGS" =~ "--config" ]]; then - # The operator already set --config, we need to replace it - # Get the full args array as JSON - ARGS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' || echo "[]") - - # Find the index of --config using a simple approach - # Convert JSON array to space-separated and find index - ARGS_LIST=$(echo "$ARGS_JSON" | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') - CONFIG_INDEX=-1 - INDEX=0 - for arg in $ARGS_LIST; do - if [[ "$arg" == "--config" ]]; then - CONFIG_INDEX=$INDEX - break - fi - INDEX=$((INDEX + 1)) - done - - if [[ $CONFIG_INDEX -ge 0 ]]; then - # Replace the --config argument and the following path argument - NEXT_INDEX=$((CONFIG_INDEX + 1)) - echo " Replacing existing --config argument at index $CONFIG_INDEX..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ - { - \"op\": \"replace\", - \"path\": \"/spec/template/spec/containers/0/args/$CONFIG_INDEX\", - \"value\": \"--config\" - }, - { - \"op\": \"replace\", - \"path\": \"/spec/template/spec/containers/0/args/$NEXT_INDEX\", - \"value\": \"/etc/router/router.yaml\" - } - ]" && echo " Successfully replaced --config arguments" || { - echo " Warning: Replace failed, trying remove-then-add approach..." - # Fallback: remove old args, then add new ones - # Remove in reverse order to avoid index shifting - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ - { - \"op\": \"remove\", - \"path\": \"/spec/template/spec/containers/0/args/$NEXT_INDEX\" - }, - { - \"op\": \"remove\", - \"path\": \"/spec/template/spec/containers/0/args/$CONFIG_INDEX\" - } - ]" 2>/dev/null || true - # Add new --config args - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "--config" - }, - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "/etc/router/router.yaml" - } - ]' - echo " Added new --config arguments" - } - else - echo " Warning: Could not find --config index, adding new --config arguments..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "--config" - }, - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "/etc/router/router.yaml" - } - ]' - fi -else - # No --config exists, add it - echo " Adding --config arguments..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "--config" - }, - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "/etc/router/router.yaml" - } - ]' - echo " Added --config arguments" -fi - -echo "Router deployment patched" - -# Wait for rollout to complete -echo "Waiting for router rollout to complete..." -kubectl rollout status deployment/${DEPLOYMENT_NAME} -n apollo --timeout=300s || true - -# Wait for router to be ready -echo "Waiting for router to be ready..." -kubectl wait --for=condition=ready --timeout=300s supergraph/${RESOURCE_NAME} -n apollo || true - echo "" echo "✓ Operator resources deployed!" echo "" -echo "Router configuration has been applied via ConfigMap:" -echo " ConfigMap: router-config (contains router.yaml)" -echo " Mounted at: /etc/router/router.yaml" -echo " Router args: --config /etc/router/router.yaml" +echo "SupergraphSchema and Supergraph CRDs have been created." +echo "The router deployment is being created by the operator." echo "" echo "Monitor router status with:" echo " kubectl get supergraphs -n apollo" echo " kubectl get pods -n apollo" -echo " kubectl logs -n apollo deployment/${DEPLOYMENT_NAME}" echo "" -echo "Next step: Run 08-deploy-ingress.sh to setup external access" +echo "Next step: Run 08-apply-router-config.sh to configure the router with custom settings" diff --git a/scripts/minikube/08-apply-router-config.sh b/scripts/minikube/08-apply-router-config.sh new file mode 100755 index 0000000..dea6863 --- /dev/null +++ b/scripts/minikube/08-apply-router-config.sh @@ -0,0 +1,365 @@ +#!/bin/bash +set -euo pipefail + +# Script 08: Apply Router Configuration +# This script patches the router deployment to use the router-config ConfigMap +# Note: Script 07 must be run first to create the Supergraph and ConfigMap + +echo "=== Step 08: Applying Router Configuration ===" + +# Load environment variables from .env if it exists +if [ -f .env ]; then + echo "Loading environment variables from .env..." + source .env +fi + +# Validate required variables +if [[ -z "${ENVIRONMENT:-}" ]]; then + echo "Error: ENVIRONMENT is required" + echo "Please set ENVIRONMENT in your .env file or export it:" + echo " export ENVIRONMENT=\"dev\"" + exit 1 +fi + +# Check if kubectl is available +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed" + exit 1 +fi + +# Verify cluster connection +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + exit 1 +fi + +# Resource name based on environment +RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" +DEPLOYMENT_NAME="${RESOURCE_NAME}" + +# Verify ConfigMap exists +if ! kubectl get configmap router-config -n apollo &>/dev/null; then + echo "Error: router-config ConfigMap not found" + echo "Please run 07-deploy-operator-resources.sh first to create the ConfigMap" + exit 1 +fi + +# Wait for router deployment to be created +echo "Waiting for router deployment to be created..." +for i in {1..60}; do + if kubectl get deployment ${DEPLOYMENT_NAME} -n apollo &>/dev/null; then + echo "Router deployment found" + break + fi + echo " Waiting for deployment... ($i/60)" + sleep 2 +done + +if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo &>/dev/null; then + echo "Error: Router deployment not found after waiting" + echo "Please check the Supergraph status:" + echo " kubectl get supergraph ${RESOURCE_NAME} -n apollo" + exit 1 +fi + +# Patch the router deployment to mount the ConfigMap and use it +echo "Patching router deployment to use ConfigMap..." + +# Check if operator's ConfigMap volume exists and replace it with ours +# The operator creates a volume that points to a ConfigMap with name pattern reference-architecture-*-config-* +VOLUMES_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes}' || echo "[]") +VOLUME_INDEX=-1 +INDEX=0 +OPERATOR_CONFIGMAP_FOUND=false +VOLUME_NAME="" + +# Check each volume to see if it points to the operator's ConfigMap +for vol_json in $(echo "$VOLUMES_JSON" | jq -c '.[]'); do + CONFIGMAP_NAME=$(echo "$vol_json" | jq -r '.configMap.name // ""') + if [[ -n "$CONFIGMAP_NAME" && "$CONFIGMAP_NAME" =~ ^reference-architecture.*-config- ]]; then + VOLUME_INDEX=$INDEX + OPERATOR_CONFIGMAP_FOUND=true + VOLUME_NAME=$(echo "$vol_json" | jq -r '.name') + echo " Found operator ConfigMap volume '$VOLUME_NAME' pointing to '$CONFIGMAP_NAME'" + break + fi + INDEX=$((INDEX + 1)) +done + +if [[ "$OPERATOR_CONFIGMAP_FOUND" == "true" ]]; then + echo " Replacing with our router-config ConfigMap..." + + if [[ $VOLUME_INDEX -ge 0 ]]; then + # Replace the operator's ConfigMap volume with ours + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/volumes/$VOLUME_INDEX\", + \"value\": { + \"name\": \"router-config\", + \"configMap\": { + \"name\": \"router-config\" + } + } + } + ]" && echo " Replaced operator ConfigMap volume with router-config" || { + echo " Warning: Failed to replace volume, trying add instead..." + # Fallback: add our volume (will have both, but ours will be used if mounted) + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "router-config", + "configMap": { + "name": "router-config" + } + } + } + ]' + } + else + # Couldn't find index, just add ours + echo " Could not find operator volume index, adding router-config volume..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "router-config", + "configMap": { + "name": "router-config" + } + } + } + ]' + fi +else + # No operator volume found, check if our volume exists + if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "router-config"; then + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "router-config", + "configMap": { + "name": "router-config" + } + } + } + ]' + echo " Added router-config volume" + else + echo " router-config volume already exists" + fi +fi + +# Check if volumeMount already exists, if not add it +# Also check if operator's volumeMount exists and replace it +MOUNTS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts}' || echo "[]") +MOUNT_INDEX=-1 +INDEX=0 +OPERATOR_MOUNT_FOUND=false + +# Find volumeMount that matches the operator's volume name or has wrong mount path +# Also check if there's a mount at /app (operator's default path) that needs replacing +for mount_json in $(echo "$MOUNTS_JSON" | jq -c '.[]'); do + MOUNT_NAME=$(echo "$mount_json" | jq -r '.name') + MOUNT_PATH=$(echo "$mount_json" | jq -r '.mountPath') + # Check if this mount points to the operator's volume name, or if it's mounted at /app (operator's default) + if [[ -n "$VOLUME_NAME" && "$MOUNT_NAME" == "$VOLUME_NAME" ]] || [[ "$MOUNT_PATH" == "/app" ]]; then + MOUNT_INDEX=$INDEX + OPERATOR_MOUNT_FOUND=true + echo " Found volumeMount '$MOUNT_NAME' at path '$MOUNT_PATH'" + break + fi + INDEX=$((INDEX + 1)) +done + +if [[ "$OPERATOR_MOUNT_FOUND" == "true" ]]; then + echo " Replacing with router-config volumeMount at /etc/router..." + + if [[ $MOUNT_INDEX -ge 0 ]]; then + # Replace the operator's volumeMount with ours + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/containers/0/volumeMounts/$MOUNT_INDEX\", + \"value\": { + \"name\": \"router-config\", + \"mountPath\": \"/etc/router\", + \"readOnly\": true + } + } + ]" && echo " Replaced operator volumeMount with router-config" || { + echo " Warning: Failed to replace volumeMount, trying add instead..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "router-config", + "mountPath": "/etc/router", + "readOnly": true + } + } + ]' + } + else + # Couldn't find index, just add ours + echo " Could not find operator mount index, adding router-config volumeMount..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "router-config", + "mountPath": "/etc/router", + "readOnly": true + } + } + ]' + fi +else + # No operator mount found, check if our mount exists + if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "router-config"; then + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "router-config", + "mountPath": "/etc/router", + "readOnly": true + } + } + ]' + echo " Added router-config volumeMount" + else + echo " router-config volumeMount already exists" + fi +fi + +# Check if --config args exist and replace them if needed +CURRENT_ARGS=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args[*]}' || echo "") +if [[ "$CURRENT_ARGS" =~ "--config" ]]; then + # The operator already set --config, we need to replace it + # Get the full args array as JSON + ARGS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' || echo "[]") + + # Find the index of --config using a simple approach + # Convert JSON array to space-separated and find index + ARGS_LIST=$(echo "$ARGS_JSON" | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') + CONFIG_INDEX=-1 + INDEX=0 + for arg in $ARGS_LIST; do + if [[ "$arg" == "--config" ]]; then + CONFIG_INDEX=$INDEX + break + fi + INDEX=$((INDEX + 1)) + done + + if [[ $CONFIG_INDEX -ge 0 ]]; then + # Replace the --config argument and the following path argument + NEXT_INDEX=$((CONFIG_INDEX + 1)) + echo " Replacing existing --config argument at index $CONFIG_INDEX..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/containers/0/args/$CONFIG_INDEX\", + \"value\": \"--config\" + }, + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/containers/0/args/$NEXT_INDEX\", + \"value\": \"/etc/router/router.yaml\" + } + ]" && echo " Successfully replaced --config arguments" || { + echo " Warning: Replace failed, trying remove-then-add approach..." + # Fallback: remove old args, then add new ones + # Remove in reverse order to avoid index shifting + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"remove\", + \"path\": \"/spec/template/spec/containers/0/args/$NEXT_INDEX\" + }, + { + \"op\": \"remove\", + \"path\": \"/spec/template/spec/containers/0/args/$CONFIG_INDEX\" + } + ]" 2>/dev/null || true + # Add new --config args + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--config" + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "/etc/router/router.yaml" + } + ]' + echo " Added new --config arguments" + } + else + echo " Warning: Could not find --config index, adding new --config arguments..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--config" + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "/etc/router/router.yaml" + } + ]' + fi +else + # No --config exists, add it + echo " Adding --config arguments..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--config" + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "/etc/router/router.yaml" + } + ]' + echo " Added --config arguments" +fi + +echo "Router deployment patched" + +# Wait for rollout to complete +echo "Waiting for router rollout to complete..." +kubectl rollout status deployment/${DEPLOYMENT_NAME} -n apollo --timeout=300s || true + +# Wait for router to be ready +echo "Waiting for router to be ready..." +kubectl wait --for=condition=ready --timeout=300s supergraph/${RESOURCE_NAME} -n apollo || true + +echo "" +echo "✓ Router configuration applied!" +echo "" +echo "Router configuration has been applied via ConfigMap:" +echo " ConfigMap: router-config (contains router.yaml)" +echo " Mounted at: /etc/router/router.yaml" +echo " Router args: --config /etc/router/router.yaml" +echo "" +echo "Monitor router status with:" +echo " kubectl get supergraphs -n apollo" +echo " kubectl get pods -n apollo" +echo " kubectl logs -n apollo deployment/${DEPLOYMENT_NAME}" +echo "" +echo "Next step: Run 09-deploy-ingress.sh to setup external access" + diff --git a/scripts/minikube/08-deploy-ingress.sh b/scripts/minikube/09-deploy-ingress.sh similarity index 97% rename from scripts/minikube/08-deploy-ingress.sh rename to scripts/minikube/09-deploy-ingress.sh index 04d71d8..0be2333 100755 --- a/scripts/minikube/08-deploy-ingress.sh +++ b/scripts/minikube/09-deploy-ingress.sh @@ -1,10 +1,10 @@ #!/bin/bash set -euo pipefail -# Script 08: Deploy Ingress +# Script 09: Deploy Ingress # This script sets up ingress for external access to the router -echo "=== Step 08: Deploying Ingress ===" +echo "=== Step 09: Deploying Ingress ===" # Load environment variables from .env if it exists if [ -f .env ]; then @@ -100,7 +100,7 @@ if grep -q "^export ROUTER_URL=" "$ENV_FILE"; then fi else echo "" >> "$ENV_FILE" - echo "# Router URL (generated by 08-deploy-ingress.sh)" >> "$ENV_FILE" + echo "# Router URL (generated by 09-deploy-ingress.sh)" >> "$ENV_FILE" echo "export ROUTER_URL=\"$ROUTER_URL\"" >> "$ENV_FILE" fi @@ -156,6 +156,6 @@ echo "to support minikube tunnel. The router is accessed via the client's nginx echo "" echo "The router URL has been saved to .env and will be used by the client deployment." echo "" -echo "Next step: Run 09-deploy-client.sh to deploy the client application (optional)" +echo "Next step: Run 10-deploy-client.sh to deploy the client application (optional)" diff --git a/scripts/minikube/09-deploy-client.sh b/scripts/minikube/10-deploy-client.sh similarity index 96% rename from scripts/minikube/09-deploy-client.sh rename to scripts/minikube/10-deploy-client.sh index 77e09f1..e7025f3 100755 --- a/scripts/minikube/09-deploy-client.sh +++ b/scripts/minikube/10-deploy-client.sh @@ -1,10 +1,10 @@ #!/bin/bash set -euo pipefail -# Script 09: Deploy Client +# Script 10: Deploy Client # This script deploys the client application (optional) -echo "=== Step 09: Deploying Client Application ===" +echo "=== Step 10: Deploying Client Application ===" # Load environment variables from .env if it exists if [ -f .env ]; then @@ -41,7 +41,7 @@ fi # Get router URL from .env file if [[ -z "${ROUTER_URL:-}" ]]; then echo "Error: ROUTER_URL is not set" - echo "Please run 08-deploy-ingress.sh first to set up the router URL" + echo "Please run 09-deploy-ingress.sh first to set up the router URL" exit 1 fi From bdda978d2476b2f0303407ad200ffd41b8d584df Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 14:11:14 -0800 Subject: [PATCH 18/31] Update user profile queries to use 'me' instead of 'user' for consistency across components. Adjust cart item count retrieval in LoggedInMenu and cart data access in Cart component accordingly. --- client/src/apollo/queries.ts | 2 +- client/src/components/Header/LoggedInMenu.tsx | 2 +- client/src/routes/cart/Cart.tsx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/client/src/apollo/queries.ts b/client/src/apollo/queries.ts index 64285c6..37fd4ea 100644 --- a/client/src/apollo/queries.ts +++ b/client/src/apollo/queries.ts @@ -40,7 +40,7 @@ const USER_PROFILE = gql` ` const USER_PROFILE_FULL = gql` query UserProfileFull { - user { + me { id cart { userId diff --git a/client/src/components/Header/LoggedInMenu.tsx b/client/src/components/Header/LoggedInMenu.tsx index daec08b..a6f3b83 100644 --- a/client/src/components/Header/LoggedInMenu.tsx +++ b/client/src/components/Header/LoggedInMenu.tsx @@ -53,7 +53,7 @@ export default function LoggedInMenu() { }, }) - const cartItemCount = data?.user?.cart?.items?.length || 0 + const cartItemCount = data?.me?.cart?.items?.length || 0 return ( diff --git a/client/src/routes/cart/Cart.tsx b/client/src/routes/cart/Cart.tsx index 6ffdb4e..7d38a48 100644 --- a/client/src/routes/cart/Cart.tsx +++ b/client/src/routes/cart/Cart.tsx @@ -65,7 +65,7 @@ export default function Cart(props: Props) { ) } - const cart = data?.user?.cart + const cart = data?.me?.cart const items = cart?.items || [] const subtotal = cart?.subtotal || 0 From da1e8765ad1ec021e0606e3cbbe51d649585a35f Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 15:26:31 -0800 Subject: [PATCH 19/31] Refactor login mutation to handle optional scopes and improve error handling. Update LoginForm component to remove unused scopes state and adjust placeholder texts for better user guidance. Enhance coprocessor request handling by removing JWT validation at the RouterRequest stage and adding detailed logging for router lifecycle stages with new Rhai scripts. --- client/src/apollo/queries.ts | 2 +- client/src/components/LoginForm/LoginForm.tsx | 21 +- coprocessor/src/index.ts | 29 +- deploy/operator-resources/rhai/main.rhai | 333 ++++++++++++++++++ deploy/operator-resources/router-config.yaml | 4 + .../minikube/07-deploy-operator-resources.sh | 9 + scripts/minikube/08-apply-router-config.sh | 97 ++++- subgraphs/users/src/resolvers/index.ts | 44 ++- 8 files changed, 473 insertions(+), 66 deletions(-) create mode 100644 deploy/operator-resources/rhai/main.rhai diff --git a/client/src/apollo/queries.ts b/client/src/apollo/queries.ts index 37fd4ea..3d5fb49 100644 --- a/client/src/apollo/queries.ts +++ b/client/src/apollo/queries.ts @@ -128,7 +128,7 @@ const LOGIN = gql` mutation Mutation( $username: String! $password: String! - $scopes: [String!]! + $scopes: [String!] ) { login(username: $username, password: $password, scopes: $scopes) { ... on LoginSuccessful { diff --git a/client/src/components/LoginForm/LoginForm.tsx b/client/src/components/LoginForm/LoginForm.tsx index 9438546..220c95b 100644 --- a/client/src/components/LoginForm/LoginForm.tsx +++ b/client/src/components/LoginForm/LoginForm.tsx @@ -28,7 +28,6 @@ const LoginForm = () => { const navigate = useNavigate() const [username, setUsername] = useState('') const [password, setPassword] = useState('') - const [scopes, setScopes] = useState('') const [loginError, setLoginError] = useState( undefined as ApolloError | undefined ) @@ -36,7 +35,6 @@ const LoginForm = () => { const resetForm = () => { setUsername('') setPassword('') - setScopes('') } const handleChangeUsername = (e: React.FormEvent) => { @@ -47,17 +45,12 @@ const LoginForm = () => { setPassword(e?.currentTarget.value) } - const handleChangeScopes = (e: React.FormEvent) => { - setScopes(e?.currentTarget.value) - } - const [loginMutation, { data, loading, error: requestError }] = useMutation( MUTATIONS.LOGIN, { variables: { username: username, password: password, - scopes: scopes.split(','), }, onCompleted: async (data) => { // Request succeeded but login failed @@ -102,7 +95,7 @@ const LoginForm = () => { bg="navy.400" borderWidth="2px" borderColor="beige.400" - placeholder="Type your username" + placeholder="user1, user2, or user3" value={username} onChange={handleChangeUsername} /> @@ -112,21 +105,11 @@ const LoginForm = () => { - - Scopes - - { /** * Handles a coprocessor request - * Validates JWT authentication for RouterRequest stage * Adds a "source" header to SubgraphRequest stage * + * Note: JWT authentication is handled by the router's built-in authentication plugin. + * The router automatically validates JWTs and enforces the @authenticated directive. + * The coprocessor should not block requests at RouterRequest stage. + * * @param req - The request object * @param res - The response object */ @@ -72,30 +75,6 @@ async function handleCoprocessorRequest( ): Promise { const payload = req.body; - // Handle RouterRequest stage - validate authentication - if (payload.stage === CoprocessorStage.ROUTER_REQUEST) { - const authHeader = payload.headers.authorization?.[0]; - const isValid = await validateToken(authHeader); - - if (!isValid) { - // Return 401 Unauthorized if token is invalid or missing - res.json({ - ...payload, - control: { - break: 401, - }, - }); - return; - } - - // Token is valid, continue with the request - res.json({ - ...payload, - control: "continue", - }); - return; - } - // Handle SubgraphRequest stage - add source header if (payload.stage === CoprocessorStage.SUBGRAPH_REQUEST) { payload.headers["source"] = ["coprocessor"]; diff --git a/deploy/operator-resources/rhai/main.rhai b/deploy/operator-resources/rhai/main.rhai new file mode 100644 index 0000000..403cbd4 --- /dev/null +++ b/deploy/operator-resources/rhai/main.rhai @@ -0,0 +1,333 @@ +// Rhai script for logging at all router lifecycle stages +// This script logs requests and responses at every stage to help debug request flow + +fn router_service(service) { + // Router Service: Beginning and end of HTTP request lifecycle + let request_callback = |request| { + log_info("=== Router Service: Request ==="); + log_info(`HTTP Method: ${request.method}`); + log_info(`HTTP Path: ${request.uri.path}`); + log_info(`Request ID: ${request.id}`); + + // Log headers + try { + log_debug("Request Headers:"); + for key in request.headers.keys() { + log_debug(` ${key}: ${request.headers[key]}`); + } + } catch(err) { + log_debug(`Could not log headers: ${err}`); + } + + // Try to get trace ID + try { + let trace_id = traceid(); + log_info(`Trace ID: ${trace_id}`); + } catch(err) { + log_debug(`Trace ID not available: ${err}`); + } + }; + + let response_callback = |response| { + log_info("=== Router Service: Response ==="); + + // Check if this is the primary response (status_code only available on primary) + if response.is_primary() { + log_info(`HTTP Status: ${response.status_code.to_string()}`); + + // Log response headers + try { + log_debug("Response Headers:"); + for key in response.headers.keys() { + log_debug(` ${key}: ${response.headers[key]}`); + } + } catch(err) { + log_debug(`Could not log headers: ${err}`); + } + } else { + log_debug("Non-primary response (deferred stream)"); + } + + log_info(`Response ID: ${response.id}`); + }; + + service.map_request(request_callback); + service.map_response(response_callback); +} + +fn supergraph_service(service) { + // Supergraph Service: Beginning and end of GraphQL request lifecycle + let request_callback = |request| { + log_info("=== Supergraph Service: Request ==="); + + // Log GraphQL operation details + try { + if request.body != () { + try { + if request.body.query != () { + log_info(`GraphQL Query: ${request.body.query}`); + } + } catch(err) { + log_debug(`Query not available: ${err}`); + } + try { + if request.body.operation_name != () { + log_info(`Operation Name: ${request.body.operation_name}`); + } + } catch(err) { + log_debug(`Operation name not available: ${err}`); + } + try { + if request.body.variables != () { + log_debug(`Variables: ${request.body.variables}`); + } + } catch(err) { + log_debug(`Variables not available: ${err}`); + } + } + } catch(err) { + log_debug(`Body not available: ${err}`); + } + + // Log context + try { + if request.context != () { + log_debug("Request Context available"); + } + } catch(err) { + log_debug(`Context not available: ${err}`); + } + + // Try to get trace ID + try { + let trace_id = traceid(); + log_info(`Trace ID: ${trace_id}`); + } catch(err) { + log_debug(`Trace ID not available: ${err}`); + } + }; + + let response_callback = |response| { + log_info("=== Supergraph Service: Response ==="); + + // Log GraphQL response details + try { + if response.body != () { + try { + if response.body.data != () { + log_debug("Response contains data"); + } + } catch(err) { + log_debug(`Data not available: ${err}`); + } + try { + if response.body.errors != () { + log_warn(`GraphQL Errors: ${response.body.errors}`); + } + } catch(err) { + log_debug(`Errors not available: ${err}`); + } + try { + if response.body.extensions != () { + log_debug(`Extensions: ${response.body.extensions}`); + } + } catch(err) { + log_debug(`Extensions not available: ${err}`); + } + } + } catch(err) { + log_debug(`Body not available: ${err}`); + } + + // Note: status_code is not available on SupergraphService responses + // HTTP status is only available on RouterService responses + }; + + service.map_request(request_callback); + service.map_response(response_callback); +} + +fn execution_service(service) { + // Execution Service: Query plan execution + let request_callback = |request| { + log_info("=== Execution Service: Request ==="); + log_info("Query plan execution initiated"); + + // Log query plan details if available + try { + if request.query_plan != () { + log_debug("Query plan available"); + } + } catch(err) { + log_debug(`Query plan not available: ${err}`); + } + + // Log operation details + try { + if request.body != () { + try { + if request.body.query != () { + log_debug(`Executing query: ${request.body.query}`); + } + } catch(err) { + log_debug(`Query not available: ${err}`); + } + try { + if request.body.operation_name != () { + log_info(`Operation: ${request.body.operation_name}`); + } + } catch(err) { + log_debug(`Operation name not available: ${err}`); + } + } + } catch(err) { + log_debug(`Body not available: ${err}`); + } + + // Try to get trace ID + try { + let trace_id = traceid(); + log_info(`Trace ID: ${trace_id}`); + } catch(err) { + log_debug(`Trace ID not available: ${err}`); + } + }; + + let response_callback = |response| { + log_info("=== Execution Service: Response ==="); + log_info("Query plan execution completed"); + + // Log execution results + try { + if response.body != () { + try { + if response.body.data != () { + log_debug("Execution returned data"); + } + } catch(err) { + log_debug(`Data not available: ${err}`); + } + try { + if response.body.errors != () { + log_warn(`Execution errors: ${response.body.errors}`); + } + } catch(err) { + log_debug(`Errors not available: ${err}`); + } + } + } catch(err) { + log_debug(`Body not available: ${err}`); + } + }; + + service.map_request(request_callback); + service.map_response(response_callback); +} + +fn subgraph_service(service, subgraph) { + // Subgraph Service: Communication with individual subgraphs + let request_callback = |request| { + log_info(`=== Subgraph Service: Request to ${subgraph} ===`); + log_info(`Subgraph: ${subgraph}`); + + // Log subgraph request details + // For subgraph_service, request.subgraph.* contains the subgraph request details + log_info(`Subgraph URI Scheme: ${request.subgraph.uri.scheme}`); + log_info(`Subgraph URI Host: ${request.subgraph.uri.host}`); + if request.subgraph.uri.port != () { + log_info(`Subgraph URI Port: ${request.subgraph.uri.port}`); + } + log_info(`Subgraph URI Path: ${request.subgraph.uri.path}`); + + // Log subgraph operation details + try { + if request.subgraph.body != () { + try { + if request.subgraph.body.query != () { + log_debug(`Subgraph Query: ${request.subgraph.body.query}`); + } + } catch(err) { + log_debug(`Subgraph query not available: ${err}`); + } + try { + if request.subgraph.body.operation_name != () { + log_debug(`Subgraph Operation: ${request.subgraph.body.operation_name}`); + } + } catch(err) { + log_debug(`Subgraph operation name not available: ${err}`); + } + try { + if request.subgraph.body.variables != () { + log_debug(`Subgraph Variables: ${request.subgraph.body.variables}`); + } + } catch(err) { + log_debug(`Subgraph variables not available: ${err}`); + } + } + } catch(err) { + log_debug(`Subgraph body not available: ${err}`); + } + + // Log headers being sent to subgraph + try { + log_debug(`Headers sent to ${subgraph}:`); + for key in request.subgraph.headers.keys() { + log_debug(` ${key}: ${request.subgraph.headers[key]}`); + } + } catch(err) { + log_debug(`Could not log subgraph request headers: ${err}`); + } + + // Try to get trace ID + try { + let trace_id = traceid(); + log_info(`Trace ID: ${trace_id}`); + } catch(err) { + log_debug(`Trace ID not available: ${err}`); + } + }; + + let response_callback = |response| { + log_info(`=== Subgraph Service: Response from ${subgraph} ===`); + log_info(`Subgraph: ${subgraph}`); + + // Log subgraph response status + log_info(`HTTP Status: ${response.status_code.to_string()}`); + + // Log subgraph response details + try { + if response.body != () { + try { + if response.body.data != () { + log_debug(`Response from ${subgraph} contains data`); + } + } catch(err) { + log_debug(`Data not available: ${err}`); + } + try { + if response.body.errors != () { + log_warn(`Errors from ${subgraph}: ${response.body.errors}`); + } + } catch(err) { + log_debug(`Errors not available: ${err}`); + } + } + } catch(err) { + log_debug(`Body not available: ${err}`); + } + + // Log headers received from subgraph + try { + log_debug(`Headers received from ${subgraph}:`); + for key in response.headers.keys() { + log_debug(` ${key}: ${response.headers[key]}`); + } + } catch(err) { + log_debug(`Could not log subgraph response headers: ${err}`); + } + }; + + service.map_request(request_callback); + service.map_response(response_callback); +} + diff --git a/deploy/operator-resources/router-config.yaml b/deploy/operator-resources/router-config.yaml index 97c1385..2fccd5b 100644 --- a/deploy/operator-resources/router-config.yaml +++ b/deploy/operator-resources/router-config.yaml @@ -54,3 +54,7 @@ sandbox: homepage: enabled: false +rhai: + scripts: "/etc/rhai" + main: "main.rhai" + diff --git a/scripts/minikube/07-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh index 9cb8ffd..0b7f8e4 100755 --- a/scripts/minikube/07-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -54,6 +54,15 @@ kubectl create configmap router-config \ echo "Router configuration ConfigMap created" +# Create Rhai scripts ConfigMap +echo "Creating Rhai scripts ConfigMap..." +kubectl create configmap rhai-scripts \ + --from-file=main.rhai=deploy/operator-resources/rhai/main.rhai \ + -n apollo \ + --dry-run=client -o yaml | kubectl apply -f - + +echo "Rhai scripts ConfigMap created" + # Deploy SupergraphSchema echo "Deploying SupergraphSchema..." cat </dev/null; then echo "Error: router-config ConfigMap not found" echo "Please run 07-deploy-operator-resources.sh first to create the ConfigMap" exit 1 fi +if ! kubectl get configmap rhai-scripts -n apollo &>/dev/null; then + echo "Error: rhai-scripts ConfigMap not found" + echo "Please run 07-deploy-operator-resources.sh first to create the ConfigMap" + exit 1 +fi + # Wait for router deployment to be created echo "Waiting for router deployment to be created..." for i in {1..60}; do @@ -241,6 +247,48 @@ else fi fi +# Add Rhai scripts volume and volumeMount +echo "Adding Rhai scripts volume and volumeMount..." + +# Check if rhai-scripts volume exists +if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "rhai-scripts"; then + echo " Adding rhai-scripts volume..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "rhai-scripts", + "configMap": { + "name": "rhai-scripts" + } + } + } + ]' + echo " Added rhai-scripts volume" +else + echo " rhai-scripts volume already exists" +fi + +# Check if rhai-scripts volumeMount exists +if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "rhai-scripts"; then + echo " Adding rhai-scripts volumeMount..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "rhai-scripts", + "mountPath": "/etc/rhai", + "readOnly": true + } + } + ]' + echo " Added rhai-scripts volumeMount" +else + echo " rhai-scripts volumeMount already exists" +fi + # Check if --config args exist and replace them if needed CURRENT_ARGS=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args[*]}' || echo "") if [[ "$CURRENT_ARGS" =~ "--config" ]]; then @@ -338,6 +386,46 @@ else echo " Added --config arguments" fi +# Add --log=debug argument if it doesn't exist +echo "Checking for --log argument..." +CURRENT_ARGS=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args[*]}' || echo "") +if [[ ! "$CURRENT_ARGS" =~ "--log" ]]; then + echo " Adding --log=debug argument..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--log=debug" + } + ]' + echo " Added --log=debug argument" +else + echo " --log argument already exists, checking if it's set to debug..." + ARGS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' || echo "[]") + ARGS_LIST=$(echo "$ARGS_JSON" | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') + LOG_INDEX=-1 + INDEX=0 + for arg in $ARGS_LIST; do + if [[ "$arg" =~ "--log" ]]; then + LOG_INDEX=$INDEX + break + fi + INDEX=$((INDEX + 1)) + done + + if [[ $LOG_INDEX -ge 0 ]]; then + echo " Replacing existing --log argument with --log=debug..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ + { + \"op\": \"replace\", + \"path\": \"/spec/template/spec/containers/0/args/$LOG_INDEX\", + \"value\": \"--log=debug\" + } + ]" + echo " Updated --log argument to debug" + fi +fi + echo "Router deployment patched" # Wait for rollout to complete @@ -354,7 +442,12 @@ echo "" echo "Router configuration has been applied via ConfigMap:" echo " ConfigMap: router-config (contains router.yaml)" echo " Mounted at: /etc/router/router.yaml" -echo " Router args: --config /etc/router/router.yaml" +echo " Router args: --config /etc/router/router.yaml --log=debug" +echo "" +echo "Rhai scripts have been mounted:" +echo " ConfigMap: rhai-scripts (contains main.rhai)" +echo " Mounted at: /etc/rhai" +echo " Scripts will log at all router lifecycle stages" echo "" echo "Monitor router status with:" echo " kubectl get supergraphs -n apollo" diff --git a/subgraphs/users/src/resolvers/index.ts b/subgraphs/users/src/resolvers/index.ts index 3fdcd3d..1f317b8 100644 --- a/subgraphs/users/src/resolvers/index.ts +++ b/subgraphs/users/src/resolvers/index.ts @@ -49,28 +49,34 @@ export const resolvers: Resolvers = { }, Mutation: { async login(_, { username, password, scopes }) { - let user = getUserbyUsername(username) - if (!user || password === "") { - return { - reason: "user not found" + try { + let user = getUserbyUsername(username) + if (!user || password === "") { + return { + reason: "user not found" + } } - } - const privateKeyText = await readFile("./keys/private_key.pem", { - encoding: "utf8" - }); + const privateKeyText = await readFile("./keys/private_key.pem", { + encoding: "utf8" + }); - const alg = "ES256"; - const privateKey = createPrivateKey(privateKeyText); - const token = await new jose.SignJWT({ - sub: user.id, - scope: scopes.join(' '), - username, - }).setProtectedHeader({ alg }).setIssuedAt().setExpirationTime('2h').sign(privateKey); + const alg = "ES256"; + const privateKey = createPrivateKey(privateKeyText); + const scopesArray = (scopes && Array.isArray(scopes)) ? scopes : []; + const token = await new jose.SignJWT({ + sub: user.id, + scope: scopesArray.join(' '), + username, + }).setProtectedHeader({ alg }).setIssuedAt().setExpirationTime('2h').sign(privateKey); - return { - token, - user, - scopes + return { + token, + user, + scopes: scopesArray + } + } catch (error) { + console.error("Login error:", error); + throw new GraphQLError(`Login failed: ${error instanceof Error ? error.message : String(error)}`); } } }, From 2c43ec42e500273ef5dada9aacc3e6cd5e6921c9 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Mon, 10 Nov 2025 15:28:43 -0800 Subject: [PATCH 20/31] Update Router Configuration Migration Guide: Expand documentation to include a comprehensive TODO list and current implementation status. Detail completed tasks, hybrid configuration methods, and known limitations of the Apollo GraphOS Operator. Enhance clarity on maintenance tasks and future improvements for router configuration management. --- .../ROUTER_CONFIG_MIGRATION.md | 319 ++++++++++-------- 1 file changed, 171 insertions(+), 148 deletions(-) diff --git a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md index 1eb962c..a3b4477 100644 --- a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md +++ b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md @@ -1,188 +1,211 @@ -# Router Configuration Migration Guide - -This document describes how the router configuration from `deploy/router/values.yaml` was migrated to operator-managed Supergraph CRDs. - -## Migration Summary - -All router configuration has been moved from Helm values (`deploy/router/values.yaml`) into the Supergraph CRD specifications: -- `deploy/operator-resources/supergraph-dev.yaml` (dev environment) -- `deploy/operator-resources/supergraph-prod.yaml` (prod environment) - -## Configuration Mapping - -### Core Router Settings (Both Dev and Prod) - -| Previous Location | New Location | Value | -|-------------------|--------------|-------| -| `router.configuration.health_check` | `spec.podTemplate.router.configuration.health_check` | `listen: 0.0.0.0:8080` | -| `router.configuration.sandbox` | `spec.podTemplate.router.configuration.sandbox` | `enabled: true` | -| `router.configuration.homepage` | `spec.podTemplate.router.configuration.homepage` | `enabled: false` | -| `router.configuration.supergraph` | `spec.podTemplate.router.configuration.supergraph` | `introspection: true` | -| `router.configuration.include_subgraph_errors` | `spec.podTemplate.router.configuration.include_subgraph_errors` | `all: true` | -| `router.configuration.plugins` | `spec.podTemplate.router.configuration.plugins` | `experimental.expose_query_plan: true` | - -### Authentication & Authorization - -- **JWKS Authentication**: Points to `http://graphql.users.svc.cluster.local:4001/.well-known/jwks.json` -- **Authorization Preview Directives**: Enabled for all subgraphs - -### Coprocessor Configuration - -- **URL**: `http://coprocessor.coprocessor.svc.cluster.local:8081` -- **Timeout**: 2s -- **Router Request Headers**: Enabled -- **Subgraph Request/Response Headers**: Enabled +# Router Configuration TODO List + +This document tracks the migration of router configuration from Helm values to operator-managed Supergraph CRDs and the current implementation status. + +## ✅ Completed Tasks + +- [x] **Graph Creation**: Graph created in Apollo GraphOS +- [x] **Environment Variants**: Dev and prod variants created +- [x] **Subgraphs Deployment**: All subgraphs deployed with CRDs using inline SDL +- [x] **Operator Installation**: Apollo GraphOS Operator installed and configured +- [x] **Router Configuration ConfigMap**: Created `router-config` ConfigMap with custom router settings +- [x] **Rhai Scripts ConfigMap**: Created `rhai-scripts` ConfigMap with logging scripts +- [x] **Router Deployment Patching**: Implemented script to patch router deployment with ConfigMap volumes and args +- [x] **Coprocessor Deployment**: Coprocessor deployed and configured for JWT authentication +- [x] **Router Log Level**: Set to debug via `--log=debug` argument +- [x] **Ingress Configuration**: Ingress set up for external access via minikube tunnel +- [x] **Client Application**: Client deployed with nginx proxying GraphQL requests + +## 🔄 Current Implementation Status + +### Router Configuration Method + +The router configuration is currently implemented using a **hybrid approach**: + +1. **Supergraph CRD**: Managed by Apollo GraphOS Operator + - Schema composition and publishing + - Basic deployment configuration (replicas, resources, version) + - Schema source reference + +2. **ConfigMap + Manual Patching**: Custom router configuration + - Router configuration YAML (`router-config.yaml`) mounted via ConfigMap + - Rhai scripts mounted via ConfigMap + - Deployment patched manually after operator creates it + - Script: `scripts/minikube/08-apply-router-config.sh` + +### Configuration Files + +| Configuration | Location | Status | +|--------------|----------|--------| +| Router Config | `deploy/operator-resources/router-config.yaml` | ✅ Implemented via ConfigMap | +| Rhai Scripts | `deploy/operator-resources/rhai/main.rhai` | ✅ Implemented via ConfigMap | +| SupergraphSchema | Created by `07-deploy-operator-resources.sh` | ✅ Operator-managed | +| Supergraph | Created by `07-deploy-operator-resources.sh` | ✅ Operator-managed (patched) | + +## 📋 Configuration Details + +### Router Configuration (`router-config.yaml`) + +Current configuration includes: +- ✅ Supergraph listen port (4000) +- ✅ Introspection enabled +- ✅ Headers propagation +- ✅ JWT authentication (JWKS from users subgraph) +- ✅ Authorization directives enabled +- ✅ CORS (allow any origin) +- ✅ Coprocessor configuration +- ✅ Health check endpoint (8088) +- ✅ Sandbox enabled +- ✅ Rhai scripts configuration ### Rhai Scripts -Rhai scripts are handled via ConfigMap and volume mounts: -- **Scripts Location**: `/dist/rhai` (mounted from ConfigMap) -- **Main Script**: `main.rhai` -- **Helper Scripts**: `client_id.rhai` +- ✅ Logging at all router lifecycle stages: + - RouterService (HTTP request/response) + - SupergraphService (GraphQL request/response) + - ExecutionService (Query plan execution) + - SubgraphService (Subgraph communication) -The ConfigMap must be created separately: -```bash -kubectl create configmap rhai-config --from-file=deploy/router/rhai/ -n apollo -``` +### Coprocessor -### Prod-Only Configuration +- ✅ Deployed and running +- ✅ Adds "source" header to subgraph requests +- ✅ JWT validation handled by router's built-in authentication plugin -The following configurations are only present in `supergraph-prod.yaml`: +## ⚠️ Known Limitations -#### Persisted Queries +### Operator CRD Limitations -```yaml -persisted_queries: - enabled: true - log_unknown: true - safelist: - enabled: false - require_id: false -``` +The Apollo GraphOS Operator CRD does not natively support: +- ❌ Custom router configuration YAML in Supergraph CRD +- ❌ ConfigMap volumes for router configuration +- ❌ Custom container args (like `--config` and `--log`) +- ❌ Rhai scripts via ConfigMap volumes + +**Workaround**: We patch the deployment manually after the operator creates it. -#### Telemetry +### Current Workarounds -- **Apollo Field-Level Instrumentation**: Sampler 0.5 -- **OTLP Tracing**: gRPC endpoint `http://collector.monitoring:4317` -- **OTLP Metrics**: gRPC endpoint `http://collector.monitoring:4317` -- **Service Name**: "router" -- **Service Namespace**: "router" +1. **Router Configuration**: + - Created as ConfigMap (`router-config`) + - Mounted via volume at `/etc/router` + - Referenced via `--config /etc/router/router.yaml` argument + - Applied via `scripts/minikube/08-apply-router-config.sh` -## How to Update Router Configuration +2. **Rhai Scripts**: + - Created as ConfigMap (`rhai-scripts`) + - Mounted via volume at `/etc/rhai` + - Referenced in router config YAML + - Applied via `scripts/minikube/08-apply-router-config.sh` -To update router configuration without redeploying subgraphs: +3. **Log Level**: + - Set via `--log=debug` argument + - Applied via `scripts/minikube/08-apply-router-config.sh` -1. Edit the appropriate Supergraph CRD file: - - Dev: `deploy/operator-resources/supergraph-dev.yaml` - - Prod: `deploy/operator-resources/supergraph-prod.yaml` +## 🔧 Maintenance Tasks -2. Update the `spec.podTemplate.router.configuration` section +### When Updating Router Configuration -3. Apply the changes: +1. Edit `deploy/operator-resources/router-config.yaml` +2. Update the ConfigMap: ```bash - kubectl apply -f deploy/operator-resources/supergraph-{dev|prod}.yaml + kubectl create configmap router-config \ + --from-file=router.yaml=deploy/operator-resources/router-config.yaml \ + -n apollo --dry-run=client -o yaml | kubectl apply -f - + ``` +3. Restart router deployment: + ```bash + kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo ``` -4. The operator will automatically trigger a router rollover with the new configuration - -## Resources - -Dev environment uses minimal resources: -- CPU: 100m -- Memory: 256Mi -- Replicas: 1 - -Prod environment uses production-grade resources: -- CPU: 500m -- Memory: 512Mi -- Replicas: 3 - -## Differences from Helm Chart - -The operator-managed approach differs from the Helm chart in several ways: +### When Updating Rhai Scripts -1. **No Helm templates**: Configuration is defined in Kubernetes-native CRDs -2. **Automatic rollover**: The operator handles rolling out changes to the router -3. **Declarative**: All configuration is version-controlled in YAML files -4. **Condition-based**: Can monitor router status via `kubectl get supergraph` +1. Edit `deploy/operator-resources/rhai/main.rhai` +2. Update the ConfigMap: + ```bash + kubectl create configmap rhai-scripts \ + --from-file=main.rhai=deploy/operator-resources/rhai/main.rhai \ + -n apollo --dry-run=client -o yaml | kubectl apply -f - + ``` +3. Restart router deployment: + ```bash + kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo + ``` -## Troubleshooting +## 🚀 Future Improvements -### Router not picking up changes +### Potential Enhancements -Check the Supergraph status: -```bash -kubectl describe supergraph reference-architecture-{dev|prod} -n apollo -``` +- [ ] **Automate ConfigMap Updates**: Create a script to update ConfigMaps and restart deployments +- [ ] **Configuration Validation**: Add validation for router-config.yaml before applying +- [ ] **Environment-Specific Configs**: Support different router configs per environment +- [ ] **Telemetry Configuration**: Add OTLP tracing/metrics configuration (if needed) +- [ ] **Persisted Queries**: Configure persisted queries for production (if needed) +- [ ] **Operator Support**: Monitor Apollo GraphOS Operator updates for native support of: + - Custom router configuration + - ConfigMap volumes + - Container args + - Rhai scripts -Look for: -- `SchemaLoaded`: Should be `True` -- `Progressing`: Shows deployment status -- `Ready`: Should be `True` when fully deployed +### Documentation Updates Needed -### Rhai scripts not working +- [ ] Update `docs/setup.md` with router configuration update procedures +- [ ] Add troubleshooting guide for router configuration issues +- [ ] Document the patching approach and why it's necessary -Verify the ConfigMap exists and is mounted: -```bash -kubectl get configmap rhai-config -n apollo -kubectl describe pod -n apollo | grep rhai-volume -``` +## 🐛 Troubleshooting -### Coprocessor connection issues +### Router Not Picking Up Configuration Changes -Ensure coprocessor is running and accessible: -```bash -kubectl get pods -n coprocessor -kubectl get svc -n coprocessor -``` +1. Verify ConfigMap exists: + ```bash + kubectl get configmap router-config -n apollo + kubectl get configmap rhai-scripts -n apollo + ``` -## Current Configuration Status +2. Check volume mounts: + ```bash + kubectl describe deployment reference-architecture-${ENVIRONMENT} -n apollo | grep -A 10 "Volumes:" + kubectl describe pod -n apollo | grep -A 10 "Mounts:" + ``` -The Supergraph CRDs in this repository use a **simplified configuration** that does not include all the advanced router settings from the original Helm chart. This is because the current Apollo GraphOS Operator CRD does not support all configuration fields. +3. Verify container args: + ```bash + kubectl get deployment reference-architecture-${ENVIRONMENT} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' + ``` -### Supported Configuration -- ✅ Replicas count -- ✅ Router version -- ✅ Resource limits/requests -- ✅ Schema source (SupergraphSchema resource reference) +4. Check router logs: + ```bash + kubectl logs -n apollo deployment/reference-architecture-${ENVIRONMENT} -f + ``` -### Not Currently Supported in Supergraph CRD -- ❌ Custom router configuration (JWKS auth, coprocessor, CORS, etc.) -- ❌ Rhai scripts via ConfigMap volumes -- ❌ Custom ingress configuration -- ❌ Service type customization -- ❌ Telemetry exporters -- ❌ Advanced authentication/authorization +### Rhai Script Errors -### Operator API Key Setup +1. Check Rhai script syntax (Rhai doesn't support `in` operator) +2. Verify ConfigMap is mounted at `/etc/rhai` +3. Check router logs for Rhai execution errors +4. Ensure router config references Rhai scripts correctly -The operator requires an **Operator API key** (not a personal API key). To create one: +### Coprocessor Issues -1. Go to GraphOS Studio -2. Navigate to your graph → Settings → API Keys -3. Create a new API key with "Operator" role -4. Update the `apollo-api-key` secret with the Operator API key: +1. Verify coprocessor is running: ```bash - kubectl create secret generic apollo-api-key \ - --from-literal="APOLLO_KEY=" \ - -n apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - + kubectl get pods -n apollo -l app.kubernetes.io/name=coprocessor ``` -### TODO: Advanced Router Configuration - -The advanced router configuration (JWKS, coprocessor, Rhai scripts, telemetry, persisted queries) from the original `deploy/router/values.yaml` has not been migrated yet. This would need to be implemented either: - -1. Via router configuration YAML file in a ConfigMap (if supported) -2. Through GraphOS Studio router configuration -3. By extending the operator to support these fields -4. By using a custom router deployment instead of the operator-managed one +2. Check coprocessor service: + ```bash + kubectl get svc coprocessor -n apollo + ``` -### Current Status +3. Verify router config has correct coprocessor URL: + ```bash + kubectl get configmap router-config -n apollo -o yaml | grep coprocessor + ``` -- Graph is created ✅ -- Dev and prod variants created ✅ -- Subgraphs deployed and CRDs created ✅ -- Operator API key needs to be set up ⚠️ -- Advanced router configuration not yet migrated ⏳ +## 📝 Notes +- The manual patching approach is necessary because the Apollo GraphOS Operator CRD doesn't support all router configuration options +- Router configuration changes require restarting the deployment (not just updating ConfigMap) +- The `08-apply-router-config.sh` script handles all patching logic automatically +- Debug logging is enabled by default via `--log=debug` argument From 0db5e22fee5b2e91e0ce9a8cc318f5eb3b831832 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 10:36:15 -0800 Subject: [PATCH 21/31] Update router configuration to utilize environment variables for log level settings. Replace manual patching of deployment arguments with declarative configuration in Supergraph CRD. Introduce a new Supergraph YAML template for streamlined deployment across environments. Enhance deployment scripts to check for environment-specific configurations, improving overall deployment process. --- .../ROUTER_CONFIG_MIGRATION.md | 19 +++++---- deploy/operator-resources/supergraph-dev.yaml | 3 ++ .../operator-resources/supergraph-prod.yaml | 3 ++ .../supergraph.yaml.template | 21 ++++++++++ .../minikube/07-deploy-operator-resources.sh | 29 +++++-------- scripts/minikube/08-apply-router-config.sh | 42 +------------------ 6 files changed, 50 insertions(+), 67 deletions(-) create mode 100644 deploy/operator-resources/supergraph.yaml.template diff --git a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md index a3b4477..5f69530 100644 --- a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md +++ b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md @@ -12,7 +12,7 @@ This document tracks the migration of router configuration from Helm values to o - [x] **Rhai Scripts ConfigMap**: Created `rhai-scripts` ConfigMap with logging scripts - [x] **Router Deployment Patching**: Implemented script to patch router deployment with ConfigMap volumes and args - [x] **Coprocessor Deployment**: Coprocessor deployed and configured for JWT authentication -- [x] **Router Log Level**: Set to debug via `--log=debug` argument +- [x] **Router Log Level**: Set to debug via `APOLLO_ROUTER_LOG` environment variable in Supergraph CRD podTemplate - [x] **Ingress Configuration**: Ingress set up for external access via minikube tunnel - [x] **Client Application**: Client deployed with nginx proxying GraphQL requests @@ -25,6 +25,7 @@ The router configuration is currently implemented using a **hybrid approach**: 1. **Supergraph CRD**: Managed by Apollo GraphOS Operator - Schema composition and publishing - Basic deployment configuration (replicas, resources, version) + - Environment variables (e.g., `APOLLO_ROUTER_LOG=debug`) - Schema source reference 2. **ConfigMap + Manual Patching**: Custom router configuration @@ -44,6 +45,12 @@ The router configuration is currently implemented using a **hybrid approach**: ## 📋 Configuration Details +### Log Level Configuration + +- ✅ Set via `APOLLO_ROUTER_LOG=debug` environment variable in Supergraph CRD `podTemplate.env` +- ✅ Configured declaratively in the CRD (no patching needed) +- ✅ Applied automatically by the operator when creating/updating the deployment + ### Router Configuration (`router-config.yaml`) Current configuration includes: @@ -79,10 +86,12 @@ Current configuration includes: The Apollo GraphOS Operator CRD does not natively support: - ❌ Custom router configuration YAML in Supergraph CRD - ❌ ConfigMap volumes for router configuration -- ❌ Custom container args (like `--config` and `--log`) +- ❌ Custom container args (like `--config`) - ❌ Rhai scripts via ConfigMap volumes -**Workaround**: We patch the deployment manually after the operator creates it. +**Note**: Environment variables (like `APOLLO_ROUTER_LOG`) are supported via `podTemplate.env` ✅ + +**Workaround**: We patch the deployment manually after the operator creates it for unsupported features. ### Current Workarounds @@ -98,10 +107,6 @@ The Apollo GraphOS Operator CRD does not natively support: - Referenced in router config YAML - Applied via `scripts/minikube/08-apply-router-config.sh` -3. **Log Level**: - - Set via `--log=debug` argument - - Applied via `scripts/minikube/08-apply-router-config.sh` - ## 🔧 Maintenance Tasks ### When Updating Router Configuration diff --git a/deploy/operator-resources/supergraph-dev.yaml b/deploy/operator-resources/supergraph-dev.yaml index 2cf03d9..2d310a0 100644 --- a/deploy/operator-resources/supergraph-dev.yaml +++ b/deploy/operator-resources/supergraph-dev.yaml @@ -7,6 +7,9 @@ spec: replicas: 3 podTemplate: routerVersion: 2.7.0 + env: + - name: APOLLO_ROUTER_LOG + value: "debug" resources: requests: cpu: 100m diff --git a/deploy/operator-resources/supergraph-prod.yaml b/deploy/operator-resources/supergraph-prod.yaml index 9b3ec55..641b9aa 100644 --- a/deploy/operator-resources/supergraph-prod.yaml +++ b/deploy/operator-resources/supergraph-prod.yaml @@ -7,6 +7,9 @@ spec: replicas: 3 podTemplate: routerVersion: 2.3.0 + env: + - name: APOLLO_ROUTER_LOG + value: "warn" resources: requests: cpu: 500m diff --git a/deploy/operator-resources/supergraph.yaml.template b/deploy/operator-resources/supergraph.yaml.template new file mode 100644 index 0000000..9fc9933 --- /dev/null +++ b/deploy/operator-resources/supergraph.yaml.template @@ -0,0 +1,21 @@ +apiVersion: apollographql.com/v1alpha2 +kind: Supergraph +metadata: + name: ${RESOURCE_NAME} + namespace: apollo +spec: + replicas: 3 + podTemplate: + routerVersion: 2.7.0 + env: + - name: APOLLO_ROUTER_LOG + value: "debug" + resources: + requests: + cpu: 100m + memory: 256Mi + schema: + resource: + name: ${RESOURCE_NAME} + namespace: apollo + diff --git a/scripts/minikube/07-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh index 0b7f8e4..cc8a2e9 100755 --- a/scripts/minikube/07-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -90,25 +90,16 @@ sleep 5 # The router configuration is loaded from the ConfigMap and mounted as a volume # The router will use the --config flag to reference the mounted file echo "Deploying Supergraph..." -cat < Date: Tue, 11 Nov 2025 11:03:27 -0800 Subject: [PATCH 22/31] Remove deprecated scripts and introduce new configuration files for Apollo GraphOS Operator deployment. Replace the existing apply-resources.sh script with a more structured approach using YAML files for operator values and subgraph templates. Update deployment scripts to utilize these new configurations, enhancing clarity and maintainability of the deployment process. --- deploy/operator-resources/apply-resources.sh | 93 ------- .../operator-resources/operator-values.yaml | 7 + .../operator-resources/subgraph.yaml.template | 14 + .../supergraphschema-dev.yaml | 2 +- .../supergraphschema-prod.yaml | 2 +- scripts/minikube/03-setup-cluster.sh | 9 +- scripts/minikube/05-deploy-subgraphs.sh | 25 +- .../minikube/07-deploy-operator-resources.sh | 25 +- terraform/minikube/create_graph.sh | 246 ------------------ terraform/minikube/setup_clusters.sh | 123 --------- 10 files changed, 44 insertions(+), 502 deletions(-) delete mode 100755 deploy/operator-resources/apply-resources.sh create mode 100644 deploy/operator-resources/operator-values.yaml create mode 100644 deploy/operator-resources/subgraph.yaml.template delete mode 100755 terraform/minikube/create_graph.sh delete mode 100755 terraform/minikube/setup_clusters.sh diff --git a/deploy/operator-resources/apply-resources.sh b/deploy/operator-resources/apply-resources.sh deleted file mode 100755 index 56d2f89..0000000 --- a/deploy/operator-resources/apply-resources.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash -set -euo pipefail - -# This script applies the operator resources with the correct graph ID -# Usage: ./apply-resources.sh [environment] -# Environment defaults to "dev" if not specified - -ENVIRONMENT=${1:-dev} - -# Check if APOLLO_GRAPH_ID is set (load from .env if available) -if [ -f .env ]; then - source .env -fi - -if [[ -z "${APOLLO_GRAPH_ID:-}" ]]; then - echo "Error: APOLLO_GRAPH_ID is not set. Please source .env file or set it as an environment variable." - exit 1 -fi - -echo "Deploying operator resources for ${ENVIRONMENT} environment with graph ID: ${APOLLO_GRAPH_ID}" - -RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" - -# Apply SupergraphSchema -cat < "$TEMP_SCHEMA" + sed "s/\${SUBGRAPH_NAME}/${subgraph}/g" deploy/operator-resources/subgraph.yaml.template > "$TEMP_TEMPLATE" + # Replace the SCHEMA_CONTENT placeholder line with the actual schema content using sed + sed "/\${SCHEMA_CONTENT}/r $TEMP_SCHEMA" "$TEMP_TEMPLATE" | sed '/\${SCHEMA_CONTENT}/d' | kubectl apply -f - + rm -f "$TEMP_TEMPLATE" "$TEMP_SCHEMA" echo "✓ ${subgraph} deployed successfully" done diff --git a/scripts/minikube/07-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh index cc8a2e9..4175146 100755 --- a/scripts/minikube/07-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -65,20 +65,17 @@ echo "Rhai scripts ConfigMap created" # Deploy SupergraphSchema echo "Deploying SupergraphSchema..." -cat <&2 - exit 1 -fi - -if [[ $(which jq) == "" ]]; then - echo "please install jq before continuing: https://stedolan.github.io/jq/" - exit 1 -fi - -if [[ $(which rover) == "" ]]; then - echo "rover not installed; see: https://www.apollographql.com/docs/rover/getting-started/" - exit 1 -fi - -# if an account id is not provided, fetch it from Studio -if [[ $ACCOUNT_ID == "" ]]; then - ACCOUNT_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --url 'https://graphql.api.apollographql.com/api/graphql' - --data '{"query":"{ me { ... on User { memberships { permission account { id } } } } }"}' - ) - - if [[ $HEADER != "" ]]; then - ACCOUNT_ARGS+=(--header "$HEADER") - fi - - ACCOUNT_RESP=$(curl "${ACCOUNT_ARGS[@]}") - ACCOUNT_COUNT=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships | length") - - # if more than one account exists, exit early - if [[ $ACCOUNT_COUNT > 1 ]]; then - echo "Apollo Studio returned more than one account." - echo "Specify an account ID with ACCOUNT_ID=myaccount $0" - echo "Accounts: " - echo $(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[].account.id") - exit 1 - fi - - ACCOUNT_ID=$(echo $ACCOUNT_RESP | jq -r ".data.me.memberships[0].account.id") -fi - -echo "Creating graph $GRAPH_ID on account $ACCOUNT_ID..." - -CREATE_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --url 'https://graphql.api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreateGraph(\$accountId: ID!, \$newServiceId: ID!, \$name: String, \$onboardingArchitecture: OnboardingArchitecture) { newService(accountId: \$accountId, id: \$newServiceId, name: \$name, onboardingArchitecture: \$onboardingArchitecture) { id apiKeys { token } } }\",\"variables\":{\"accountId\":\"$ACCOUNT_ID\",\"newServiceId\":\"$GRAPH_ID\",\"name\":\"Build a Supergraph $(date +"%Y-%m-%d")\",\"onboardingArchitecture\":\"SUPERGRAPH\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_ARGS+=(--header "$HEADER") -fi - -CREATE_RESP=$(curl "${CREATE_ARGS[@]}") - -IS_SUCCESS=$(echo $CREATE_RESP | jq -r ".data.newService") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating graph" - echo $CREATE_RESP | jq . - exit 1 -fi - -GRAPH_KEY=$(echo $CREATE_RESP | jq -r ".data.newService.apiKeys[0].token") - -# Create Operator API key for the operator to use -echo "Creating Operator API key..." - -CREATE_OPERATOR_KEY_ARGS=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header "apollographql-client-name: reference-architecture" - --header "apollographql-client-version: 1.0" - --header 'content-type: application/json' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreateOperatorKey(\$name: String!, \$type: GraphOsKeyType!, \$organizationId: ID!) { organization(id: \$organizationId) { createKey(name: \$name, type: \$type) { id keyName expiresAt token } } }\",\"variables\":{\"name\":\"operator\",\"type\":\"OPERATOR\",\"organizationId\":\"$ACCOUNT_ID\"}}" -) - -CREATE_OPERATOR_KEY_RESP=$(curl "${CREATE_OPERATOR_KEY_ARGS[@]}") - -OPERATOR_KEY=$(echo $CREATE_OPERATOR_KEY_RESP | jq -r ".data.organization.createKey.token") -if [[ "$OPERATOR_KEY" == "null" ]]; then - echo "Error creating operator key" - echo $CREATE_OPERATOR_KEY_RESP | jq . - exit 1 -fi - -echo "Operator key created successfully" - -# Note: Subgraph schema publishing is now handled by the Apollo GraphOS Operator -# when Subgraph CRDs are deployed. No manual rover publish commands needed. -# We create variants by publishing dummy subgraphs to them. - -echo "Creating dev and prod variants by publishing dummy subgraphs..." - -for variant in "${VARIANTS[@]}"; do - echo "Creating variant: $variant" - - PUBLISH_ARGS=( - --silent - --header "x-api-key: $GRAPH_KEY" - --header "apollographql-client-name: reference-architecture" - --header "apollographql-client-version: 1.0" - --header 'content-type: application/json' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation PublishSubgraph(\$graphId: ID!, \$graphVariant: String!, \$name: String!, \$revision: String!, \$activePartialSchema: PartialSchemaInput!, \$url: String) { graph(id: \$graphId) { publishSubgraph(graphVariant: \$graphVariant, name: \$name, revision: \$revision, activePartialSchema: \$activePartialSchema, url: \$url) { subgraphsCreated errors { message locations { column line } code } wasCreated wasUpdated } } }\",\"variables\":{\"graphId\":\"$GRAPH_ID\",\"graphVariant\":\"$variant\",\"name\":\"temp-subgraph\",\"revision\":\"1\",\"activePartialSchema\":{\"sdl\":\"type Query { temp: String }\"},\"url\":\"http://localhost:1234\"}}" - ) - - PUBLISH_RESP=$(curl "${PUBLISH_ARGS[@]}") - - if [[ $(echo $PUBLISH_RESP | jq -r ".data.graph.publishSubgraph.errors | length") > 0 ]]; then - echo "Error creating variant $variant" - echo $PUBLISH_RESP | jq . - exit 1 - fi - - echo "Created variant: $variant" -done - -# Create persisted query lists for dev and prod -# dev -CREATE_PQ_ARGS_DEV=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_PQ_ARGS_DEV+=(--header "$HEADER") -fi - -CREATE_PQ_DEV_RESP=$(curl "${CREATE_PQ_ARGS_DEV[@]}") - -IS_SUCCESS=$(echo $CREATE_PQ_DEV_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating pq list for dev" - echo $CREATE_PQ_DEV_RESP | jq . - exit 1 -fi - -DEV_PQ_ID=$(echo $CREATE_PQ_DEV_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList.id") - -UPDATE_DEV_PQ_LIST_ARGS=( - --silent - --request POST - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$DEV_PQ_ID\",\"name\":\"dev\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - UPDATE_DEV_PQ_LIST_ARGS+=(--header "$HEADER") -fi - -UPDATE_DEV_PQ_LIST_RESP=$(curl "${UPDATE_DEV_PQ_LIST_ARGS[@]}") - -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_DEV_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for dev" - echo $UPDATE_DEV_PQ_LIST_RESP | jq . - exit 1 -fi - -# prod -CREATE_PQ_ARGS_PROD=( - --silent - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation CreatePersistedQueryList(\$name: String!, \$graphId: ID!) { graph(id: \$graphId) { createPersistedQueryList(name: \$name) { ... on CreatePersistedQueryListResult { persistedQueryList { id } } } } }\",\"variables\":{\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" -) - -if [[ $HEADER != "" ]]; then - CREATE_PQ_ARGS_DEV+=(--header "$HEADER") -fi - -CREATE_PQ_PROD_RESP=$(curl "${CREATE_PQ_ARGS_PROD[@]}") -IS_SUCCESS=$(echo $CREATE_PQ_PROD_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList") -if [[ "$IS_SUCCESS" == "null" ]]; then - echo "Error creating pq list for prod" - echo $CREATE_PQ_PROD_RESP | jq . - exit 1 -fi - -PROD_PQ_ID=$(echo $CREATE_PQ_PROD_RESP | jq -r ".data.graph.createPersistedQueryList.persistedQueryList.id") - -UPDATE_PROD_PQ_LIST_ARGS=( - --silent - --request POST - --header "x-api-key: $APOLLO_KEY" - --header 'content-type: application/json' - --header 'apollographql-client-name: reference-architecture' - --header 'apollographql-client-version: 1.0' - --url 'https://api.apollographql.com/api/graphql' - --data "{\"query\":\"mutation LinkPersistedQueryList(\$persistedQueryListId: ID!, \$name: String!, \$graphId: ID!) { graph(id: \$graphId) { variant(name: \$name) { linkPersistedQueryList(persistedQueryListId: \$persistedQueryListId) { __typename ... on ListNotFoundError { listId message } ... on PermissionError { message } ... on VariantAlreadyLinkedError { message } } } } }\",\"variables\":{\"persistedQueryListId\":\"$PROD_PQ_ID\",\"name\":\"prod\",\"graphId\":\"$GRAPH_ID\"}}" -) -if [[ $HEADER != "" ]]; then - UPDATE_PROD_PQ_LIST_ARGS+=(--header "$HEADER") -fi - -UPDATE_PROD_PQ_LIST_RESP=$(curl "${UPDATE_PROD_PQ_LIST_ARGS[@]}") - -# Check for errors in the response -ERROR_TYPE=$(echo $UPDATE_PROD_PQ_LIST_RESP | jq -r ".data.graph.variant.linkPersistedQueryList.__typename") -if [[ "$ERROR_TYPE" == "ListNotFoundError" ]] || [[ "$ERROR_TYPE" == "PermissionError" ]] || [[ "$ERROR_TYPE" == "VariantAlreadyLinkedError" ]]; then - echo "Error linking pq list for prod" - echo $UPDATE_PROD_PQ_LIST_RESP | jq . - exit 1 -fi - -echo '' -echo "Adding Apollo credentials as Terraform variables in .env..." -echo '' >> .env -echo "export TF_VAR_apollo_key=\"$GRAPH_KEY\"" >> .env -echo "export TF_VAR_apollo_graph_id=\"$GRAPH_ID\"" >> .env -echo "export TF_VAR_pq_dev_id=\"$DEV_PQ_ID\"" >> .env -echo "export TF_VAR_pq_prod_id=\"$PROD_PQ_ID\"" >> .env -echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> .env -echo '' >> .env -echo 'Re-run `source .env` to load them.' diff --git a/terraform/minikube/setup_clusters.sh b/terraform/minikube/setup_clusters.sh deleted file mode 100755 index 8f563b9..0000000 --- a/terraform/minikube/setup_clusters.sh +++ /dev/null @@ -1,123 +0,0 @@ -#/bin/bash -set -euxo pipefail - -# default vars -CLUSTER_PREFIX=${CLUSTER_PREFIX:-"apollo-supergraph-k8s"} -PROJECT_REGION=${PROJECT_REGION:-"us-east1"} -PROJECT_CLUSTERS=("${CLUSTER_PREFIX}-dev" "${CLUSTER_PREFIX}-prod") -# end default vars - -if [[ $(which gcloud) == "" ]]; then - echo "gcloud not installed" - exit 1 -fi - -if [[ $(which kubectl) == "" ]]; then - echo "kubectl not installed" - exit 1 -fi - -if [[ $(which kubectx) == "" ]]; then - echo "kubectx not installed" - exit 1 -fi - -if [[ -z "$PROJECT_ID" ]]; then - echo "Must provide PROJECT_ID in environment" 1>&2 - exit 1 -fi - -environment_setup(){ - echo "Configuring Kubeconfig for ${1}..." - gcloud container clusters get-credentials ${1} --zone ${PROJECT_REGION} --project ${PROJECT_ID} - - # short context aliases: supports `kubectx apollo-supergraph-k8s-dev` - kubectx ${1}=. - - # monitoring setup: namespace, service account, and binding - # the service account name matches the otel collector's service account in its helm chart - kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - - kubectl create serviceaccount -n "monitoring" "metrics-writer" --dry-run=client -o yaml | kubectl apply -f - - kubectl annotate serviceaccount -n "monitoring" "metrics-writer" "iam.gke.io/gcp-service-account=${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" --overwrite - gcloud iam service-accounts add-iam-policy-binding \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${PROJECT_ID}.svc.id.goog[monitoring/metrics-writer]" \ - "${CLUSTER_PREFIX:0:12}-metrics-writer@$PROJECT_ID.iam.gserviceaccount.com" - - # Apollo GraphOS Operator setup - echo "Installing Apollo GraphOS Operator..." - kubectl create namespace apollo-operator --dry-run=client -o yaml | kubectl apply -f - - kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - - - # Create operator API key secret (requires OPERATOR_KEY to be set) - if [[ -n "$OPERATOR_KEY" ]]; then - kubectl create secret generic apollo-api-key \ - --from-literal="APOLLO_KEY=$OPERATOR_KEY" \ - -n apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - echo "Operator API key secret created" - else - echo "Warning: OPERATOR_KEY not set. Operator secret not created." - fi - - # Create GitHub Container Registry image pull secret (optional, requires TF_VAR_github_token) - if [[ -n "$TF_VAR_github_token" && -n "$GITHUB_ORG" ]]; then - echo "Creating GitHub Container Registry image pull secret..." - # Create in default namespace - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=default \ - --dry-run=client -o yaml | kubectl apply -f - - - # Create in apollo namespace - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=apollo \ - --dry-run=client -o yaml | kubectl apply -f - - - # Create in apollo-operator namespace and patch service account - kubectl create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username="$GITHUB_ORG" \ - --docker-password="$TF_VAR_github_token" \ - --namespace=apollo-operator \ - --dry-run=client -o yaml | kubectl apply -f - - - kubectl patch serviceaccount apollo-operator -n apollo-operator \ - -p '{"imagePullSecrets":[{"name":"ghcr-secret"}]}' || true - - echo "GitHub Container Registry image pull secret created" - else - echo "Warning: TF_VAR_github_token and/or GITHUB_ORG not set. Image pull secret not created." - echo " Subgraphs may fail to pull images if they are private. Set these variables to enable image pull authentication." - fi - - # Install operator using Helm - if [[ $(which helm) != "" ]]; then - helm upgrade --install --atomic apollo-operator \ - oci://registry-1.docker.io/apollograph/operator-chart \ - -n apollo-operator \ - --create-namespace \ - -f - < Date: Tue, 11 Nov 2025 11:08:53 -0800 Subject: [PATCH 23/31] Remove deprecated Ingress configuration files for development and production environments. Update documentation to reflect changes in router configuration management and patching approach. Enhance clarity on updating router configurations and troubleshooting steps. --- .../ROUTER_CONFIG_MIGRATION.md | 5 - deploy/operator-resources/ingress-dev.yaml | 18 --- deploy/operator-resources/ingress-prod.yaml | 18 --- docs/operator-guide.md | 16 ++- docs/setup.md | 131 +++++++++++++++++- 5 files changed, 141 insertions(+), 47 deletions(-) delete mode 100644 deploy/operator-resources/ingress-dev.yaml delete mode 100644 deploy/operator-resources/ingress-prod.yaml diff --git a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md index 5f69530..1b263c3 100644 --- a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md +++ b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md @@ -152,11 +152,6 @@ The Apollo GraphOS Operator CRD does not natively support: - Container args - Rhai scripts -### Documentation Updates Needed - -- [ ] Update `docs/setup.md` with router configuration update procedures -- [ ] Add troubleshooting guide for router configuration issues -- [ ] Document the patching approach and why it's necessary ## 🐛 Troubleshooting diff --git a/deploy/operator-resources/ingress-dev.yaml b/deploy/operator-resources/ingress-dev.yaml deleted file mode 100644 index 1b7760d..0000000 --- a/deploy/operator-resources/ingress-dev.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: router - namespace: apollo -spec: - ingressClassName: nginx - rules: - - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: reference-architecture-dev - port: - number: 80 - diff --git a/deploy/operator-resources/ingress-prod.yaml b/deploy/operator-resources/ingress-prod.yaml deleted file mode 100644 index 60b8bc9..0000000 --- a/deploy/operator-resources/ingress-prod.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: router - namespace: apollo -spec: - ingressClassName: nginx - rules: - - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: reference-architecture-prod - port: - number: 80 - diff --git a/docs/operator-guide.md b/docs/operator-guide.md index e239ca5..73b1557 100644 --- a/docs/operator-guide.md +++ b/docs/operator-guide.md @@ -234,13 +234,21 @@ kubectl logs -n apollo deployment/reference-architecture-{dev|prod} ## Updating Router Configuration +**Note:** The router configuration is managed separately from the Supergraph CRD. See [Updating Router Configuration](../docs/setup.md#step-5-updating-router-configuration) in the setup guide for details. + +The Apollo GraphOS Operator's `Supergraph` CRD does not support custom router configuration YAML directly. Instead, we use a hybrid approach: + +1. The operator creates the router deployment with basic settings +2. Script 08 patches the deployment to mount custom configuration via ConfigMap +3. Configuration updates require updating the ConfigMap and restarting the deployment + To update router configuration without changing subgraphs: -```bash -kubectl edit supergraph reference-architecture-dev -n apollo -``` +1. Edit `deploy/operator-resources/router-config.yaml` +2. Update the ConfigMap: `kubectl create configmap router-config --from-file=router.yaml=deploy/operator-resources/router-config.yaml -n apollo --dry-run=client -o yaml | kubectl apply -f -` +3. Restart the deployment: `kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo` -Changes are applied via rolling update - the operator manages the rollout. +For more details, see the [Router Configuration Migration Guide](../deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md). ## Best Practices diff --git a/docs/setup.md b/docs/setup.md index 4261e33..9170b8e 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -270,9 +270,80 @@ Or test the health endpoint (if accessible on the main port): curl http://localhost:4000/health ``` -## Step 5: Logging Into the Client Application +## Step 5: Updating Router Configuration -If you deployed the client application (script 08), you can log in using the following test credentials: +The router configuration is stored in `deploy/operator-resources/router-config.yaml`. To update the router configuration: + +### Why We Patch the Deployment + +The Apollo GraphOS Operator's `Supergraph` CRD does not natively support custom router configuration YAML, ConfigMap volumes, or custom container arguments. Therefore, we use a **hybrid approach**: + +1. The operator creates the router deployment with basic settings +2. Script 08 (`08-apply-router-config.sh`) patches the deployment to: + - Mount the `router-config` ConfigMap as a volume + - Add `--config /etc/router/router.yaml` argument + - Mount the `rhai-scripts` ConfigMap for custom scripts + - Set log level via environment variable (`APOLLO_ROUTER_LOG`) + +This patching approach is necessary because the operator doesn't support these advanced configuration options directly in the CRD. + +### Updating Router Configuration + +To update the router configuration: + +1. **Edit the configuration file:** + ```bash + # Edit the router configuration + vim deploy/operator-resources/router-config.yaml + ``` + +2. **Update the ConfigMap:** + ```bash + kubectl create configmap router-config \ + --from-file=router.yaml=deploy/operator-resources/router-config.yaml \ + -n apollo --dry-run=client -o yaml | kubectl apply -f - + ``` + +3. **Restart the router deployment:** + ```bash + kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo + ``` + +4. **Wait for rollout to complete:** + ```bash + kubectl rollout status deployment/reference-architecture-${ENVIRONMENT} -n apollo + ``` + +### Updating Rhai Scripts + +To update the Rhai scripts: + +1. **Edit the script file:** + ```bash + vim deploy/operator-resources/rhai/main.rhai + ``` + +2. **Update the ConfigMap:** + ```bash + kubectl create configmap rhai-scripts \ + --from-file=main.rhai=deploy/operator-resources/rhai/main.rhai \ + -n apollo --dry-run=client -o yaml | kubectl apply -f - + ``` + +3. **Restart the router deployment:** + ```bash + kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo + ``` + +**Note:** If you need to re-apply the router configuration patching (e.g., after operator updates the deployment), you can re-run script 08: + +```bash +./scripts/minikube/08-apply-router-config.sh +``` + +## Step 6: Logging Into the Client Application + +If you deployed the client application (script 10), you can log in using the following test credentials: ### Test Users @@ -367,6 +438,62 @@ kubectl describe supergraph reference-architecture-${ENVIRONMENT} -n apollo kubectl logs -n apollo deployment/reference-architecture-${ENVIRONMENT} ``` +### Router configuration not applied + +If the router is not picking up configuration changes: + +1. **Verify ConfigMaps exist:** + ```bash + kubectl get configmap router-config -n apollo + kubectl get configmap rhai-scripts -n apollo + ``` + +2. **Check volume mounts:** + ```bash + kubectl describe deployment reference-architecture-${ENVIRONMENT} -n apollo | grep -A 10 "Volumes:" + kubectl describe pod -n apollo | grep -A 10 "Mounts:" + ``` + +3. **Verify container arguments:** + ```bash + kubectl get deployment reference-architecture-${ENVIRONMENT} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' + ``` + + Should include `--config /etc/router/router.yaml` + +4. **Check router logs for configuration errors:** + ```bash + kubectl logs -n apollo deployment/reference-architecture-${ENVIRONMENT} | grep -i "config\|error" + ``` + +5. **Re-apply router configuration:** + ```bash + ./scripts/minikube/08-apply-router-config.sh + ``` + +### Router pods in CrashLoopBackOff + +If router pods are crashing: + +1. **Check pod logs:** + ```bash + kubectl logs -n apollo deployment/reference-architecture-${ENVIRONMENT} --previous + ``` + +2. **Common causes:** + - Invalid YAML in `router-config.yaml` (check syntax) + - Missing ConfigMap (verify ConfigMaps exist) + - Volume mount path incorrect (should be `/etc/router`) + - Configuration file not found (check `--config` argument) + +3. **Verify configuration syntax:** + ```bash + # Check if router-config.yaml is valid YAML + kubectl create configmap router-config \ + --from-file=router.yaml=deploy/operator-resources/router-config.yaml \ + -n apollo --dry-run=client -o yaml | kubectl apply -f - --dry-run=client + ``` + ### Ingress not working Ensure ingress addon is enabled: From 009093c21788adc50fd7cca1c4c06acce4cdda5b Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 12:55:53 -0800 Subject: [PATCH 24/31] Add TODO and README files for deployment components; mark as not yet implemented - Introduced a TODO.md file to track future improvements and features for the reference architecture. - Added README files for OpenTelemetry Collector, Grafana, InfluxDB, k6, tests, and Zipkin, indicating that these components are not yet implemented and should not be used. - Each README outlines the current status and future implementation plans for the respective components, enhancing clarity for developers on the project's roadmap. --- TODO.md | 28 +++ deploy/collector/README.md | 20 +++ deploy/grafana/README.md | 20 +++ deploy/influxdb/README.md | 20 +++ deploy/k6/README.md | 58 ++----- deploy/tests/README.md | 20 +++ deploy/zipkin/README.md | 19 ++ scripts/minikube/08-apply-router-config.sh | 192 +++++---------------- 8 files changed, 178 insertions(+), 199 deletions(-) create mode 100644 TODO.md create mode 100644 deploy/collector/README.md create mode 100644 deploy/grafana/README.md create mode 100644 deploy/influxdb/README.md create mode 100644 deploy/tests/README.md create mode 100644 deploy/zipkin/README.md diff --git a/TODO.md b/TODO.md new file mode 100644 index 0000000..8c8fe71 --- /dev/null +++ b/TODO.md @@ -0,0 +1,28 @@ +# Project TODO + +This file tracks future improvements and features for the reference architecture. + +## Deployment & Infrastructure + +- [ ] Deploy MCP (Model Context Protocol) server + - Set up MCP server deployment in Kubernetes + - Configure integration with Apollo GraphOS + - Add deployment scripts + - Update documentation + +## Monitoring & Observability + +- [ ] Implement Zipkin for distributed tracing +- [ ] Implement OpenTelemetry Collector +- [ ] Implement Grafana for metrics visualization +- [ ] Implement InfluxDB for time-series metrics storage +- [ ] Implement k6 for load testing +- [ ] Integrate test scripts with k6 + +## Future Enhancements + +- [ ] Add persisted queries support +- [ ] Add telemetry configuration to router +- [ ] Add advanced router configuration options +- [ ] Add production-ready security configurations + diff --git a/deploy/collector/README.md b/deploy/collector/README.md new file mode 100644 index 0000000..d7c4e9c --- /dev/null +++ b/deploy/collector/README.md @@ -0,0 +1,20 @@ +# ⚠️ Collector (OpenTelemetry) - Not Yet Implemented + +**This directory is not yet implemented and should not be used.** + +The OpenTelemetry Collector Helm chart is present in this directory but is not integrated into the deployment scripts. Do not attempt to deploy the collector until implementation is complete. + +## Status + +- [ ] Deployment script not created +- [ ] Integration with Zipkin not configured +- [ ] Integration with subgraphs not configured +- [ ] Documentation not written +- [ ] Testing not completed + +## Future Implementation + +When implemented, the OpenTelemetry Collector will be used for collecting and exporting telemetry data. The collector configuration references Zipkin as an exporter, but the deployment is not yet automated. + +**Do not use this directory until implementation is complete.** + diff --git a/deploy/grafana/README.md b/deploy/grafana/README.md new file mode 100644 index 0000000..1056eec --- /dev/null +++ b/deploy/grafana/README.md @@ -0,0 +1,20 @@ +# ⚠️ Grafana - Not Yet Implemented + +**This directory is not yet implemented and should not be used.** + +The Grafana values file is present in this directory but Grafana is not integrated into the deployment scripts. Do not attempt to deploy Grafana until implementation is complete. + +## Status + +- [ ] Deployment script not created +- [ ] Integration with InfluxDB not configured +- [ ] Dashboard provisioning not set up +- [ ] Documentation not written +- [ ] Testing not completed + +## Future Implementation + +When implemented, Grafana will be used for visualizing metrics and dashboards. The values file references InfluxDB as a datasource and includes k6 dashboard configuration, but the deployment is not yet automated. + +**Do not use this directory until implementation is complete.** + diff --git a/deploy/influxdb/README.md b/deploy/influxdb/README.md new file mode 100644 index 0000000..0a7ad2a --- /dev/null +++ b/deploy/influxdb/README.md @@ -0,0 +1,20 @@ +# ⚠️ InfluxDB - Not Yet Implemented + +**This directory is not yet implemented and should not be used.** + +The InfluxDB values file is present in this directory but InfluxDB is not integrated into the deployment scripts. Do not attempt to deploy InfluxDB until implementation is complete. + +## Status + +- [ ] Deployment script not created +- [ ] Integration with Grafana not configured +- [ ] Integration with k6 not configured +- [ ] Documentation not written +- [ ] Testing not completed + +## Future Implementation + +When implemented, InfluxDB will be used as a time-series database for storing metrics. Grafana is configured to use InfluxDB as a datasource, but the deployment is not yet automated. + +**Do not use this directory until implementation is complete.** + diff --git a/deploy/k6/README.md b/deploy/k6/README.md index cb46533..9c7e4b4 100644 --- a/deploy/k6/README.md +++ b/deploy/k6/README.md @@ -1,53 +1,19 @@ -# k6-operator +# ⚠️ k6 - Not Yet Implemented -![Version: 0.0.1](https://img.shields.io/badge/Version-0.0.1-informational?style=flat-square) ![AppVersion: 0.0.6](https://img.shields.io/badge/AppVersion-0.0.6-informational?style=flat-square) +**This directory is not yet implemented and should not be used.** -A Helm chart to install the k6 operator +The k6 operator Helm chart is present in this directory but k6 is not integrated into the deployment scripts. Do not attempt to deploy k6 until implementation is complete. -**Homepage:** +## Status -## Maintainers +- [ ] Deployment script not created +- [ ] Integration with InfluxDB not configured +- [ ] Test scripts not integrated +- [ ] Documentation not written +- [ ] Testing not completed -| Name | Email | Url | -| ---- | ------ | --- | -| yorugac | yorugac@gmail.com | | +## Future Implementation -## Source Code +When implemented, k6 will be used for load testing the supergraph. The k6 operator will run test scripts and export results to InfluxDB for visualization in Grafana, but the deployment is not yet automated. -* - -## Requirements - -Kubernetes: `>=1.16.0-0` - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| authProxy.enabled | bool | `true` | enables the protection of /metrics endpoint. (https://github.com/brancz/kube-rbac-proxy) | -| authProxy.image.name | string | `"gcr.io/kubebuilder/kube-rbac-proxy"` | rbac-proxy image name | -| authProxy.image.pullPolicy | string | `"IfNotPresent"` | pull policy for the image can be Always, Never, IfNotPresent (default: IfNotPresent) | -| authProxy.image.tag | string | `"v0.5.0"` | rbac-proxy image tag | -| authProxy.livenessProbe | object | `{}` | Liveness probe in Probe format | -| authProxy.readinessProbe | object | `{}` | Readiness probe in Probe format | -| authProxy.resources | object | `{}` | rbac-proxy resource limitation/request | -| controlPlane | string | `"controller-manager"` | | -| customAnnotations | object | `{}` | Custom Annotations to be applied on all resources | -| customLabels | object | `{}` | Custom Label to be applied on all resources | -| manager.env | object | `{}` | Environment variable to be passet to the controller | -| manager.image.name | string | `"ghcr.io/grafana/operator"` | controller-manager image name | -| manager.image.pullPolicy | string | `"Always"` | pull policy for the image possible values Always, Never, IfNotPresent (default: Always) | -| manager.image.tag | string | `"latest"` | controller-manager image tag | -| manager.livenessProbe | object | `{}` | Liveness probe in Probe format | -| manager.readinessProbe | object | `{}` | Readiness probe in Probe format | -| manager.resources | object | `{"limits":{"cpu":"100m","memory":"100Mi"},"requests":{"cpu":"100m","memory":"50Mi"}}` | controller-manager Resources definition | -| manager.resources.limits.cpu | string | `"100m"` | controller-manager CPU limit (Max) | -| manager.resources.limits.memory | string | `"100Mi"` | controller-manager Memory limit (Max) | -| manager.resources.requests.cpu | string | `"100m"` | controller-manager CPU request (Min) | -| manager.resources.requests.memory | string | `"50Mi"` | controller-manager Memory request (Min) | -| manager.serviceAccount | string | `"k6-operator-controller"` | kubernetes service account for the manager | -| namespace.create | bool | `true` | create the namespace (default: true) | -| prometheus.enabled | bool | `false` | enables the prometheus metrics scraping (default: false) | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.5.0](https://github.com/norwoodj/helm-docs/releases/v1.5.0) \ No newline at end of file +**Do not use this directory until implementation is complete.** diff --git a/deploy/tests/README.md b/deploy/tests/README.md new file mode 100644 index 0000000..b52afe9 --- /dev/null +++ b/deploy/tests/README.md @@ -0,0 +1,20 @@ +# ⚠️ Tests - Not Yet Implemented + +**This directory is not yet implemented and should not be used.** + +The tests Helm chart is present in this directory but is not integrated into the deployment scripts. Do not attempt to deploy tests until implementation is complete. + +## Status + +- [ ] Deployment script not created +- [ ] Test scripts not integrated +- [ ] Integration with k6 not configured +- [ ] Documentation not written +- [ ] Testing not completed + +## Future Implementation + +When implemented, this chart will deploy test scripts (short.js and long.js) as ConfigMaps for use with k6 load testing. The deployment is not yet automated. + +**Do not use this directory until implementation is complete.** + diff --git a/deploy/zipkin/README.md b/deploy/zipkin/README.md new file mode 100644 index 0000000..3087390 --- /dev/null +++ b/deploy/zipkin/README.md @@ -0,0 +1,19 @@ +# ⚠️ Zipkin - Not Yet Implemented + +**This directory is not yet implemented and should not be used.** + +The Zipkin Helm chart is present in this directory but is not integrated into the deployment scripts. Do not attempt to deploy Zipkin until implementation is complete. + +## Status + +- [ ] Deployment script not created +- [ ] Integration with tracing not configured +- [ ] Documentation not written +- [ ] Testing not completed + +## Future Implementation + +When implemented, Zipkin will be used for distributed tracing across the supergraph. The collector service references Zipkin in its configuration, but the deployment is not yet automated. + +**Do not use this directory until implementation is complete.** + diff --git a/scripts/minikube/08-apply-router-config.sh b/scripts/minikube/08-apply-router-config.sh index a50a603..986ccb5 100755 --- a/scripts/minikube/08-apply-router-config.sh +++ b/scripts/minikube/08-apply-router-config.sh @@ -71,180 +71,63 @@ fi # Patch the router deployment to mount the ConfigMap and use it echo "Patching router deployment to use ConfigMap..." -# Check if operator's ConfigMap volume exists and replace it with ours -# The operator creates a volume that points to a ConfigMap with name pattern reference-architecture-*-config-* +# IMPORTANT: We do NOT modify the operator's ConfigMap (it contains the supergraph schema). +# Instead, we add our own ConfigMap for router configuration alongside the operator's ConfigMap. +# The operator's ConfigMap is managed by the operator and should not be modified directly. + +# Check if operator's ConfigMap volume exists (for informational purposes) VOLUMES_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes}' || echo "[]") -VOLUME_INDEX=-1 -INDEX=0 OPERATOR_CONFIGMAP_FOUND=false -VOLUME_NAME="" +OPERATOR_VOLUME_NAME="" # Check each volume to see if it points to the operator's ConfigMap for vol_json in $(echo "$VOLUMES_JSON" | jq -c '.[]'); do CONFIGMAP_NAME=$(echo "$vol_json" | jq -r '.configMap.name // ""') if [[ -n "$CONFIGMAP_NAME" && "$CONFIGMAP_NAME" =~ ^reference-architecture.*-config- ]]; then - VOLUME_INDEX=$INDEX OPERATOR_CONFIGMAP_FOUND=true - VOLUME_NAME=$(echo "$vol_json" | jq -r '.name') - echo " Found operator ConfigMap volume '$VOLUME_NAME' pointing to '$CONFIGMAP_NAME'" + OPERATOR_VOLUME_NAME=$(echo "$vol_json" | jq -r '.name') + echo " Found operator ConfigMap volume '$OPERATOR_VOLUME_NAME' pointing to '$CONFIGMAP_NAME' (keeping it intact)" break fi - INDEX=$((INDEX + 1)) done -if [[ "$OPERATOR_CONFIGMAP_FOUND" == "true" ]]; then - echo " Replacing with our router-config ConfigMap..." - - if [[ $VOLUME_INDEX -ge 0 ]]; then - # Replace the operator's ConfigMap volume with ours - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ - { - \"op\": \"replace\", - \"path\": \"/spec/template/spec/volumes/$VOLUME_INDEX\", - \"value\": { - \"name\": \"router-config\", - \"configMap\": { - \"name\": \"router-config\" - } +# Add our router-config volume (don't replace the operator's) +if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "router-config"; then + echo " Adding router-config volume (alongside operator's ConfigMap)..." + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "router-config", + "configMap": { + "name": "router-config" } } - ]" && echo " Replaced operator ConfigMap volume with router-config" || { - echo " Warning: Failed to replace volume, trying add instead..." - # Fallback: add our volume (will have both, but ours will be used if mounted) - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "router-config", - "configMap": { - "name": "router-config" - } - } - } - ]' } - else - # Couldn't find index, just add ours - echo " Could not find operator volume index, adding router-config volume..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "router-config", - "configMap": { - "name": "router-config" - } - } - } - ]' - fi + ]' + echo " Added router-config volume" else - # No operator volume found, check if our volume exists - if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "router-config"; then - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "router-config", - "configMap": { - "name": "router-config" - } - } - } - ]' - echo " Added router-config volume" - else - echo " router-config volume already exists" - fi + echo " router-config volume already exists" fi -# Check if volumeMount already exists, if not add it -# Also check if operator's volumeMount exists and replace it -MOUNTS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts}' || echo "[]") -MOUNT_INDEX=-1 -INDEX=0 -OPERATOR_MOUNT_FOUND=false - -# Find volumeMount that matches the operator's volume name or has wrong mount path -# Also check if there's a mount at /app (operator's default path) that needs replacing -for mount_json in $(echo "$MOUNTS_JSON" | jq -c '.[]'); do - MOUNT_NAME=$(echo "$mount_json" | jq -r '.name') - MOUNT_PATH=$(echo "$mount_json" | jq -r '.mountPath') - # Check if this mount points to the operator's volume name, or if it's mounted at /app (operator's default) - if [[ -n "$VOLUME_NAME" && "$MOUNT_NAME" == "$VOLUME_NAME" ]] || [[ "$MOUNT_PATH" == "/app" ]]; then - MOUNT_INDEX=$INDEX - OPERATOR_MOUNT_FOUND=true - echo " Found volumeMount '$MOUNT_NAME' at path '$MOUNT_PATH'" - break - fi - INDEX=$((INDEX + 1)) -done - -if [[ "$OPERATOR_MOUNT_FOUND" == "true" ]]; then - echo " Replacing with router-config volumeMount at /etc/router..." - - if [[ $MOUNT_INDEX -ge 0 ]]; then - # Replace the operator's volumeMount with ours - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ - { - \"op\": \"replace\", - \"path\": \"/spec/template/spec/containers/0/volumeMounts/$MOUNT_INDEX\", - \"value\": { - \"name\": \"router-config\", - \"mountPath\": \"/etc/router\", - \"readOnly\": true - } +# Add volumeMount for router-config (keep operator's mount intact) +echo "Adding router-config volumeMount..." +if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "router-config"; then + kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "router-config", + "mountPath": "/etc/router", + "readOnly": true } - ]" && echo " Replaced operator volumeMount with router-config" || { - echo " Warning: Failed to replace volumeMount, trying add instead..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "router-config", - "mountPath": "/etc/router", - "readOnly": true - } - } - ]' } - else - # Couldn't find index, just add ours - echo " Could not find operator mount index, adding router-config volumeMount..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "router-config", - "mountPath": "/etc/router", - "readOnly": true - } - } - ]' - fi + ]' + echo " Added router-config volumeMount at /etc/router" else - # No operator mount found, check if our mount exists - if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "router-config"; then - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "router-config", - "mountPath": "/etc/router", - "readOnly": true - } - } - ]' - echo " Added router-config volumeMount" - else - echo " router-config volumeMount already exists" - fi + echo " router-config volumeMount already exists" fi # Add Rhai scripts volume and volumeMount @@ -404,6 +287,9 @@ echo " ConfigMap: router-config (contains router.yaml)" echo " Mounted at: /etc/router/router.yaml" echo " Router args: --config /etc/router/router.yaml" echo "" +echo "Note: The operator's ConfigMap (containing the supergraph schema) is kept intact." +echo " Both ConfigMaps are mounted separately - operator's for schema, ours for config." +echo "" echo "Rhai scripts have been mounted:" echo " ConfigMap: rhai-scripts (contains main.rhai)" echo " Mounted at: /etc/rhai" From aa9aedc36f739ace437a0d8b3bd0e40adcaa1dba Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 13:41:01 -0800 Subject: [PATCH 25/31] Refactor router configuration management: Transition to declarative configuration via Supergraph CRD, eliminating the need for manual patching and separate ConfigMaps. Update documentation to reflect new configuration methods and streamline deployment processes. Remove deprecated scripts related to router configuration and enhance clarity on updating and troubleshooting router settings. --- README.md | 3 +- .../ROUTER_CONFIG_MIGRATION.md | 169 +++------ deploy/operator-resources/rhai/main.rhai | 333 ------------------ deploy/operator-resources/router-config.yaml | 4 - deploy/operator-resources/supergraph-dev.yaml | 37 ++ .../operator-resources/supergraph-prod.yaml | 37 ++ docs/operator-guide.md | 18 +- docs/setup.md | 123 ++----- .../minikube/07-deploy-operator-resources.sh | 25 +- scripts/minikube/08-apply-router-config.sh | 304 ---------------- ...deploy-ingress.sh => 08-deploy-ingress.sh} | 8 +- ...0-deploy-client.sh => 09-deploy-client.sh} | 6 +- 12 files changed, 182 insertions(+), 885 deletions(-) delete mode 100644 deploy/operator-resources/rhai/main.rhai delete mode 100755 scripts/minikube/08-apply-router-config.sh rename scripts/minikube/{09-deploy-ingress.sh => 08-deploy-ingress.sh} (97%) rename scripts/minikube/{10-deploy-client.sh => 09-deploy-client.sh} (96%) diff --git a/README.md b/README.md index 2932015..3a69c56 100644 --- a/README.md +++ b/README.md @@ -5,8 +5,7 @@ This repository contains a reference architecture utilizing [Kubernetes](https:/ Once the architecture is fully stood up, you'll have: - An Apollo Router running and managed by the [Apollo GraphOS Operator](https://www.apollographql.com/docs/apollo-operator/), utilizing: - - [A coprocessor for handling customizations outside of the router](https://www.apollographql.com/docs/router/customizations/coprocessor) (tracked, not deployed yet) - - [Rhai scripts to do basic customizations within the router container](https://www.apollographql.com/docs/router/customizations/rhai) (tracked, not deployed yet) + - [A coprocessor for handling customizations outside of the router](https://www.apollographql.com/docs/router/customizations/coprocessor) - [Authorization/Authentication directives](https://www.apollographql.com/docs/router/configuration/authorization) - Eight subgraphs, each handling a portion of the overall supergraph schema, with schemas automatically published to GraphOS via the operator using inline SDL - A React-based frontend application utilizing Apollo Client (optional) diff --git a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md index 1b263c3..e265a09 100644 --- a/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md +++ b/deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md @@ -1,6 +1,6 @@ -# Router Configuration TODO List +# Router Configuration Status -This document tracks the migration of router configuration from Helm values to operator-managed Supergraph CRDs and the current implementation status. +This document tracks the router configuration implementation using the Apollo GraphOS Operator. ## ✅ Completed Tasks @@ -8,11 +8,9 @@ This document tracks the migration of router configuration from Helm values to o - [x] **Environment Variants**: Dev and prod variants created - [x] **Subgraphs Deployment**: All subgraphs deployed with CRDs using inline SDL - [x] **Operator Installation**: Apollo GraphOS Operator installed and configured -- [x] **Router Configuration ConfigMap**: Created `router-config` ConfigMap with custom router settings -- [x] **Rhai Scripts ConfigMap**: Created `rhai-scripts` ConfigMap with logging scripts -- [x] **Router Deployment Patching**: Implemented script to patch router deployment with ConfigMap volumes and args +- [x] **Router Configuration**: Router config now handled via `spec.routerConfig` in Supergraph CRD ✅ - [x] **Coprocessor Deployment**: Coprocessor deployed and configured for JWT authentication -- [x] **Router Log Level**: Set to debug via `APOLLO_ROUTER_LOG` environment variable in Supergraph CRD podTemplate +- [x] **Router Log Level**: Set via `APOLLO_ROUTER_LOG` environment variable in Supergraph CRD podTemplate - [x] **Ingress Configuration**: Ingress set up for external access via minikube tunnel - [x] **Client Application**: Client deployed with nginx proxying GraphQL requests @@ -20,40 +18,29 @@ This document tracks the migration of router configuration from Helm values to o ### Router Configuration Method -The router configuration is currently implemented using a **hybrid approach**: +The router configuration is now implemented using the **native operator approach**: 1. **Supergraph CRD**: Managed by Apollo GraphOS Operator - Schema composition and publishing - Basic deployment configuration (replicas, resources, version) - Environment variables (e.g., `APOLLO_ROUTER_LOG=debug`) + - **Router configuration via `spec.routerConfig`** ✅ - Schema source reference -2. **ConfigMap + Manual Patching**: Custom router configuration - - Router configuration YAML (`router-config.yaml`) mounted via ConfigMap - - Rhai scripts mounted via ConfigMap - - Deployment patched manually after operator creates it - - Script: `scripts/minikube/08-apply-router-config.sh` - ### Configuration Files | Configuration | Location | Status | |--------------|----------|--------| -| Router Config | `deploy/operator-resources/router-config.yaml` | ✅ Implemented via ConfigMap | -| Rhai Scripts | `deploy/operator-resources/rhai/main.rhai` | ✅ Implemented via ConfigMap | +| Router Config | `deploy/operator-resources/supergraph-{dev\|prod}.yaml` (spec.routerConfig) | ✅ Native operator support | | SupergraphSchema | Created by `07-deploy-operator-resources.sh` | ✅ Operator-managed | -| Supergraph | Created by `07-deploy-operator-resources.sh` | ✅ Operator-managed (patched) | +| Supergraph | Created by `07-deploy-operator-resources.sh` | ✅ Operator-managed | ## 📋 Configuration Details -### Log Level Configuration - -- ✅ Set via `APOLLO_ROUTER_LOG=debug` environment variable in Supergraph CRD `podTemplate.env` -- ✅ Configured declaratively in the CRD (no patching needed) -- ✅ Applied automatically by the operator when creating/updating the deployment +### Router Configuration (`spec.routerConfig`) -### Router Configuration (`router-config.yaml`) +Router configuration is now defined directly in the Supergraph CRD via `spec.routerConfig`. Current configuration includes: -Current configuration includes: - ✅ Supergraph listen port (4000) - ✅ Introspection enabled - ✅ Headers propagation @@ -63,15 +50,27 @@ Current configuration includes: - ✅ Coprocessor configuration - ✅ Health check endpoint (8088) - ✅ Sandbox enabled -- ✅ Rhai scripts configuration +- ✅ Homepage disabled + +### Updating Router Configuration -### Rhai Scripts +To update router configuration: + +1. **Edit the Supergraph resource:** + ```bash + # Edit the router configuration + vim deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml + ``` + +2. **Apply the changes:** + ```bash + kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml + ``` -- ✅ Logging at all router lifecycle stages: - - RouterService (HTTP request/response) - - SupergraphService (GraphQL request/response) - - ExecutionService (Query plan execution) - - SubgraphService (Subgraph communication) +3. **The operator will automatically:** + - Update the router deployment with the new configuration + - Roll out the changes to all router pods + - No manual patching required! ### Coprocessor @@ -79,113 +78,42 @@ Current configuration includes: - ✅ Adds "source" header to subgraph requests - ✅ JWT validation handled by router's built-in authentication plugin -## ⚠️ Known Limitations +## ✅ Operator CRD Support -### Operator CRD Limitations - -The Apollo GraphOS Operator CRD does not natively support: -- ❌ Custom router configuration YAML in Supergraph CRD -- ❌ ConfigMap volumes for router configuration -- ❌ Custom container args (like `--config`) -- ❌ Rhai scripts via ConfigMap volumes - -**Note**: Environment variables (like `APOLLO_ROUTER_LOG`) are supported via `podTemplate.env` ✅ - -**Workaround**: We patch the deployment manually after the operator creates it for unsupported features. - -### Current Workarounds - -1. **Router Configuration**: - - Created as ConfigMap (`router-config`) - - Mounted via volume at `/etc/router` - - Referenced via `--config /etc/router/router.yaml` argument - - Applied via `scripts/minikube/08-apply-router-config.sh` - -2. **Rhai Scripts**: - - Created as ConfigMap (`rhai-scripts`) - - Mounted via volume at `/etc/rhai` - - Referenced in router config YAML - - Applied via `scripts/minikube/08-apply-router-config.sh` +The Apollo GraphOS Operator CRD now supports: +- ✅ Custom router configuration YAML via `spec.routerConfig` ✅ +- ✅ Environment variables via `podTemplate.env` ✅ +- ✅ Basic deployment configuration ✅ ## 🔧 Maintenance Tasks ### When Updating Router Configuration -1. Edit `deploy/operator-resources/router-config.yaml` -2. Update the ConfigMap: - ```bash - kubectl create configmap router-config \ - --from-file=router.yaml=deploy/operator-resources/router-config.yaml \ - -n apollo --dry-run=client -o yaml | kubectl apply -f - - ``` -3. Restart router deployment: - ```bash - kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo - ``` - -### When Updating Rhai Scripts - -1. Edit `deploy/operator-resources/rhai/main.rhai` -2. Update the ConfigMap: - ```bash - kubectl create configmap rhai-scripts \ - --from-file=main.rhai=deploy/operator-resources/rhai/main.rhai \ - -n apollo --dry-run=client -o yaml | kubectl apply -f - - ``` -3. Restart router deployment: - ```bash - kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo - ``` - -## 🚀 Future Improvements - -### Potential Enhancements - -- [ ] **Automate ConfigMap Updates**: Create a script to update ConfigMaps and restart deployments -- [ ] **Configuration Validation**: Add validation for router-config.yaml before applying -- [ ] **Environment-Specific Configs**: Support different router configs per environment -- [ ] **Telemetry Configuration**: Add OTLP tracing/metrics configuration (if needed) -- [ ] **Persisted Queries**: Configure persisted queries for production (if needed) -- [ ] **Operator Support**: Monitor Apollo GraphOS Operator updates for native support of: - - Custom router configuration - - ConfigMap volumes - - Container args - - Rhai scripts - +1. Edit `deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` +2. Update the `spec.routerConfig` section +3. Apply: `kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` +4. The operator handles the rest automatically! ## 🐛 Troubleshooting ### Router Not Picking Up Configuration Changes -1. Verify ConfigMap exists: - ```bash - kubectl get configmap router-config -n apollo - kubectl get configmap rhai-scripts -n apollo - ``` - -2. Check volume mounts: +1. Verify Supergraph resource has routerConfig: ```bash - kubectl describe deployment reference-architecture-${ENVIRONMENT} -n apollo | grep -A 10 "Volumes:" - kubectl describe pod -n apollo | grep -A 10 "Mounts:" + kubectl get supergraph reference-architecture-${ENVIRONMENT} -n apollo -o yaml | grep -A 50 routerConfig ``` -3. Verify container args: +2. Check router deployment status: ```bash - kubectl get deployment reference-architecture-${ENVIRONMENT} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' + kubectl get deployment reference-architecture-${ENVIRONMENT} -n apollo + kubectl rollout status deployment/reference-architecture-${ENVIRONMENT} -n apollo ``` -4. Check router logs: +3. Check router logs: ```bash kubectl logs -n apollo deployment/reference-architecture-${ENVIRONMENT} -f ``` -### Rhai Script Errors - -1. Check Rhai script syntax (Rhai doesn't support `in` operator) -2. Verify ConfigMap is mounted at `/etc/rhai` -3. Check router logs for Rhai execution errors -4. Ensure router config references Rhai scripts correctly - ### Coprocessor Issues 1. Verify coprocessor is running: @@ -200,12 +128,11 @@ The Apollo GraphOS Operator CRD does not natively support: 3. Verify router config has correct coprocessor URL: ```bash - kubectl get configmap router-config -n apollo -o yaml | grep coprocessor + kubectl get supergraph reference-architecture-${ENVIRONMENT} -n apollo -o yaml | grep coprocessor ``` ## 📝 Notes -- The manual patching approach is necessary because the Apollo GraphOS Operator CRD doesn't support all router configuration options -- Router configuration changes require restarting the deployment (not just updating ConfigMap) -- The `08-apply-router-config.sh` script handles all patching logic automatically -- Debug logging is enabled by default via `--log=debug` argument +- Router configuration is now fully declarative via the Supergraph CRD +- No manual patching required for router configuration +- Configuration changes are automatically applied by the operator diff --git a/deploy/operator-resources/rhai/main.rhai b/deploy/operator-resources/rhai/main.rhai deleted file mode 100644 index 403cbd4..0000000 --- a/deploy/operator-resources/rhai/main.rhai +++ /dev/null @@ -1,333 +0,0 @@ -// Rhai script for logging at all router lifecycle stages -// This script logs requests and responses at every stage to help debug request flow - -fn router_service(service) { - // Router Service: Beginning and end of HTTP request lifecycle - let request_callback = |request| { - log_info("=== Router Service: Request ==="); - log_info(`HTTP Method: ${request.method}`); - log_info(`HTTP Path: ${request.uri.path}`); - log_info(`Request ID: ${request.id}`); - - // Log headers - try { - log_debug("Request Headers:"); - for key in request.headers.keys() { - log_debug(` ${key}: ${request.headers[key]}`); - } - } catch(err) { - log_debug(`Could not log headers: ${err}`); - } - - // Try to get trace ID - try { - let trace_id = traceid(); - log_info(`Trace ID: ${trace_id}`); - } catch(err) { - log_debug(`Trace ID not available: ${err}`); - } - }; - - let response_callback = |response| { - log_info("=== Router Service: Response ==="); - - // Check if this is the primary response (status_code only available on primary) - if response.is_primary() { - log_info(`HTTP Status: ${response.status_code.to_string()}`); - - // Log response headers - try { - log_debug("Response Headers:"); - for key in response.headers.keys() { - log_debug(` ${key}: ${response.headers[key]}`); - } - } catch(err) { - log_debug(`Could not log headers: ${err}`); - } - } else { - log_debug("Non-primary response (deferred stream)"); - } - - log_info(`Response ID: ${response.id}`); - }; - - service.map_request(request_callback); - service.map_response(response_callback); -} - -fn supergraph_service(service) { - // Supergraph Service: Beginning and end of GraphQL request lifecycle - let request_callback = |request| { - log_info("=== Supergraph Service: Request ==="); - - // Log GraphQL operation details - try { - if request.body != () { - try { - if request.body.query != () { - log_info(`GraphQL Query: ${request.body.query}`); - } - } catch(err) { - log_debug(`Query not available: ${err}`); - } - try { - if request.body.operation_name != () { - log_info(`Operation Name: ${request.body.operation_name}`); - } - } catch(err) { - log_debug(`Operation name not available: ${err}`); - } - try { - if request.body.variables != () { - log_debug(`Variables: ${request.body.variables}`); - } - } catch(err) { - log_debug(`Variables not available: ${err}`); - } - } - } catch(err) { - log_debug(`Body not available: ${err}`); - } - - // Log context - try { - if request.context != () { - log_debug("Request Context available"); - } - } catch(err) { - log_debug(`Context not available: ${err}`); - } - - // Try to get trace ID - try { - let trace_id = traceid(); - log_info(`Trace ID: ${trace_id}`); - } catch(err) { - log_debug(`Trace ID not available: ${err}`); - } - }; - - let response_callback = |response| { - log_info("=== Supergraph Service: Response ==="); - - // Log GraphQL response details - try { - if response.body != () { - try { - if response.body.data != () { - log_debug("Response contains data"); - } - } catch(err) { - log_debug(`Data not available: ${err}`); - } - try { - if response.body.errors != () { - log_warn(`GraphQL Errors: ${response.body.errors}`); - } - } catch(err) { - log_debug(`Errors not available: ${err}`); - } - try { - if response.body.extensions != () { - log_debug(`Extensions: ${response.body.extensions}`); - } - } catch(err) { - log_debug(`Extensions not available: ${err}`); - } - } - } catch(err) { - log_debug(`Body not available: ${err}`); - } - - // Note: status_code is not available on SupergraphService responses - // HTTP status is only available on RouterService responses - }; - - service.map_request(request_callback); - service.map_response(response_callback); -} - -fn execution_service(service) { - // Execution Service: Query plan execution - let request_callback = |request| { - log_info("=== Execution Service: Request ==="); - log_info("Query plan execution initiated"); - - // Log query plan details if available - try { - if request.query_plan != () { - log_debug("Query plan available"); - } - } catch(err) { - log_debug(`Query plan not available: ${err}`); - } - - // Log operation details - try { - if request.body != () { - try { - if request.body.query != () { - log_debug(`Executing query: ${request.body.query}`); - } - } catch(err) { - log_debug(`Query not available: ${err}`); - } - try { - if request.body.operation_name != () { - log_info(`Operation: ${request.body.operation_name}`); - } - } catch(err) { - log_debug(`Operation name not available: ${err}`); - } - } - } catch(err) { - log_debug(`Body not available: ${err}`); - } - - // Try to get trace ID - try { - let trace_id = traceid(); - log_info(`Trace ID: ${trace_id}`); - } catch(err) { - log_debug(`Trace ID not available: ${err}`); - } - }; - - let response_callback = |response| { - log_info("=== Execution Service: Response ==="); - log_info("Query plan execution completed"); - - // Log execution results - try { - if response.body != () { - try { - if response.body.data != () { - log_debug("Execution returned data"); - } - } catch(err) { - log_debug(`Data not available: ${err}`); - } - try { - if response.body.errors != () { - log_warn(`Execution errors: ${response.body.errors}`); - } - } catch(err) { - log_debug(`Errors not available: ${err}`); - } - } - } catch(err) { - log_debug(`Body not available: ${err}`); - } - }; - - service.map_request(request_callback); - service.map_response(response_callback); -} - -fn subgraph_service(service, subgraph) { - // Subgraph Service: Communication with individual subgraphs - let request_callback = |request| { - log_info(`=== Subgraph Service: Request to ${subgraph} ===`); - log_info(`Subgraph: ${subgraph}`); - - // Log subgraph request details - // For subgraph_service, request.subgraph.* contains the subgraph request details - log_info(`Subgraph URI Scheme: ${request.subgraph.uri.scheme}`); - log_info(`Subgraph URI Host: ${request.subgraph.uri.host}`); - if request.subgraph.uri.port != () { - log_info(`Subgraph URI Port: ${request.subgraph.uri.port}`); - } - log_info(`Subgraph URI Path: ${request.subgraph.uri.path}`); - - // Log subgraph operation details - try { - if request.subgraph.body != () { - try { - if request.subgraph.body.query != () { - log_debug(`Subgraph Query: ${request.subgraph.body.query}`); - } - } catch(err) { - log_debug(`Subgraph query not available: ${err}`); - } - try { - if request.subgraph.body.operation_name != () { - log_debug(`Subgraph Operation: ${request.subgraph.body.operation_name}`); - } - } catch(err) { - log_debug(`Subgraph operation name not available: ${err}`); - } - try { - if request.subgraph.body.variables != () { - log_debug(`Subgraph Variables: ${request.subgraph.body.variables}`); - } - } catch(err) { - log_debug(`Subgraph variables not available: ${err}`); - } - } - } catch(err) { - log_debug(`Subgraph body not available: ${err}`); - } - - // Log headers being sent to subgraph - try { - log_debug(`Headers sent to ${subgraph}:`); - for key in request.subgraph.headers.keys() { - log_debug(` ${key}: ${request.subgraph.headers[key]}`); - } - } catch(err) { - log_debug(`Could not log subgraph request headers: ${err}`); - } - - // Try to get trace ID - try { - let trace_id = traceid(); - log_info(`Trace ID: ${trace_id}`); - } catch(err) { - log_debug(`Trace ID not available: ${err}`); - } - }; - - let response_callback = |response| { - log_info(`=== Subgraph Service: Response from ${subgraph} ===`); - log_info(`Subgraph: ${subgraph}`); - - // Log subgraph response status - log_info(`HTTP Status: ${response.status_code.to_string()}`); - - // Log subgraph response details - try { - if response.body != () { - try { - if response.body.data != () { - log_debug(`Response from ${subgraph} contains data`); - } - } catch(err) { - log_debug(`Data not available: ${err}`); - } - try { - if response.body.errors != () { - log_warn(`Errors from ${subgraph}: ${response.body.errors}`); - } - } catch(err) { - log_debug(`Errors not available: ${err}`); - } - } - } catch(err) { - log_debug(`Body not available: ${err}`); - } - - // Log headers received from subgraph - try { - log_debug(`Headers received from ${subgraph}:`); - for key in response.headers.keys() { - log_debug(` ${key}: ${response.headers[key]}`); - } - } catch(err) { - log_debug(`Could not log subgraph response headers: ${err}`); - } - }; - - service.map_request(request_callback); - service.map_response(response_callback); -} - diff --git a/deploy/operator-resources/router-config.yaml b/deploy/operator-resources/router-config.yaml index 2fccd5b..97c1385 100644 --- a/deploy/operator-resources/router-config.yaml +++ b/deploy/operator-resources/router-config.yaml @@ -54,7 +54,3 @@ sandbox: homepage: enabled: false -rhai: - scripts: "/etc/rhai" - main: "main.rhai" - diff --git a/deploy/operator-resources/supergraph-dev.yaml b/deploy/operator-resources/supergraph-dev.yaml index 2d310a0..ac25f5b 100644 --- a/deploy/operator-resources/supergraph-dev.yaml +++ b/deploy/operator-resources/supergraph-dev.yaml @@ -18,4 +18,41 @@ spec: resource: name: reference-architecture-dev namespace: apollo + routerConfig: + supergraph: + listen: 0.0.0.0:4000 + introspection: true + headers: + all: + request: + - propagate: + matching: .* + authentication: + router: + jwt: + jwks: + - url: http://graphql.users.svc.cluster.local:4001/.well-known/jwks.json + authorization: + directives: + enabled: true + cors: + allow_any_origin: true + coprocessor: + url: http://coprocessor.apollo.svc.cluster.local:8081 + timeout: 2s + router: + request: + headers: true + subgraph: + all: + request: + headers: true + response: + headers: true + health_check: + listen: 0.0.0.0:8088 + sandbox: + enabled: true + homepage: + enabled: false diff --git a/deploy/operator-resources/supergraph-prod.yaml b/deploy/operator-resources/supergraph-prod.yaml index 641b9aa..59841e9 100644 --- a/deploy/operator-resources/supergraph-prod.yaml +++ b/deploy/operator-resources/supergraph-prod.yaml @@ -18,4 +18,41 @@ spec: resource: name: reference-architecture-prod namespace: apollo + routerConfig: + supergraph: + listen: 0.0.0.0:4000 + introspection: true + headers: + all: + request: + - propagate: + matching: .* + authentication: + router: + jwt: + jwks: + - url: http://graphql.users.svc.cluster.local:4001/.well-known/jwks.json + authorization: + directives: + enabled: true + cors: + allow_any_origin: true + coprocessor: + url: http://coprocessor.apollo.svc.cluster.local:8081 + timeout: 2s + router: + request: + headers: true + subgraph: + all: + request: + headers: true + response: + headers: true + health_check: + listen: 0.0.0.0:8088 + sandbox: + enabled: true + homepage: + enabled: false diff --git a/docs/operator-guide.md b/docs/operator-guide.md index 73b1557..6da1553 100644 --- a/docs/operator-guide.md +++ b/docs/operator-guide.md @@ -234,19 +234,21 @@ kubectl logs -n apollo deployment/reference-architecture-{dev|prod} ## Updating Router Configuration -**Note:** The router configuration is managed separately from the Supergraph CRD. See [Updating Router Configuration](../docs/setup.md#step-5-updating-router-configuration) in the setup guide for details. +Router configuration is now handled directly via `spec.routerConfig` in the Supergraph CRD. See [Updating Router Configuration](../docs/setup.md#step-5-updating-router-configuration) in the setup guide for details. -The Apollo GraphOS Operator's `Supergraph` CRD does not support custom router configuration YAML directly. Instead, we use a hybrid approach: +The Apollo GraphOS Operator's `Supergraph` CRD supports router configuration natively via `spec.routerConfig`: -1. The operator creates the router deployment with basic settings -2. Script 08 patches the deployment to mount custom configuration via ConfigMap -3. Configuration updates require updating the ConfigMap and restarting the deployment +1. Edit `deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` +2. Update the `spec.routerConfig` section +3. Apply: `kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` +4. The operator automatically updates the router deployment and rolls out changes To update router configuration without changing subgraphs: -1. Edit `deploy/operator-resources/router-config.yaml` -2. Update the ConfigMap: `kubectl create configmap router-config --from-file=router.yaml=deploy/operator-resources/router-config.yaml -n apollo --dry-run=client -o yaml | kubectl apply -f -` -3. Restart the deployment: `kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo` +1. Edit `deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` +2. Update the `spec.routerConfig` section with your desired router settings +3. Apply: `kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` +4. The operator handles the rollout automatically - no manual patching or restarts needed! For more details, see the [Router Configuration Migration Guide](../deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md). diff --git a/docs/setup.md b/docs/setup.md index 9170b8e..ca6afa7 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -158,9 +158,8 @@ This script: ``` This script: -- Creates the router-config ConfigMap - Deploys SupergraphSchema CRD (triggers composition) -- Deploys Supergraph CRD (deploys the Apollo Router) +- Deploys Supergraph CRD with router configuration (deploys the Apollo Router) - Waits for the router deployment to be created **Note:** The coprocessor (script 06) must be deployed before running this script. @@ -173,23 +172,10 @@ kubectl get pods -n apollo kubectl describe supergraph reference-architecture-${ENVIRONMENT} -n apollo ``` -### Script 08: Apply Router Configuration +### Script 08: Deploy Ingress ```bash -./scripts/minikube/08-apply-router-config.sh -``` - -This script: -- Patches the router deployment to mount the router-config ConfigMap -- Configures the router to use custom settings (coprocessor, CORS, etc.) -- Waits for the router rollout to complete - -**Note:** Script 07 must be run first to create the Supergraph and ConfigMap. - -### Script 09: Deploy Ingress - -```bash -./scripts/minikube/09-deploy-ingress.sh +./scripts/minikube/08-deploy-ingress.sh ``` This script: @@ -197,10 +183,10 @@ This script: - Configures the ingress controller as LoadBalancer for `minikube tunnel` support - Provides access URLs for the router -### Script 10: Deploy Client (Optional) +### Script 09: Deploy Client (Optional) ```bash -./scripts/minikube/10-deploy-client.sh +./scripts/minikube/09-deploy-client.sh ``` This script: @@ -272,78 +258,51 @@ curl http://localhost:4000/health ## Step 5: Updating Router Configuration -The router configuration is stored in `deploy/operator-resources/router-config.yaml`. To update the router configuration: +Router configuration is now handled directly in the Supergraph CRD via `spec.routerConfig`. To update the router configuration: -### Why We Patch the Deployment +### Router Configuration via Supergraph CRD -The Apollo GraphOS Operator's `Supergraph` CRD does not natively support custom router configuration YAML, ConfigMap volumes, or custom container arguments. Therefore, we use a **hybrid approach**: +The Apollo GraphOS Operator supports router configuration natively via the `spec.routerConfig` property in the Supergraph CRD. This means: -1. The operator creates the router deployment with basic settings -2. Script 08 (`08-apply-router-config.sh`) patches the deployment to: - - Mount the `router-config` ConfigMap as a volume - - Add `--config /etc/router/router.yaml` argument - - Mount the `rhai-scripts` ConfigMap for custom scripts - - Set log level via environment variable (`APOLLO_ROUTER_LOG`) - -This patching approach is necessary because the operator doesn't support these advanced configuration options directly in the CRD. +1. **No manual patching required** - The operator handles everything automatically +2. **Declarative configuration** - All router settings are in the Supergraph YAML +3. **Automatic rollout** - Changes are applied automatically when you update the CRD ### Updating Router Configuration To update the router configuration: -1. **Edit the configuration file:** +1. **Edit the Supergraph resource:** ```bash # Edit the router configuration - vim deploy/operator-resources/router-config.yaml - ``` - -2. **Update the ConfigMap:** - ```bash - kubectl create configmap router-config \ - --from-file=router.yaml=deploy/operator-resources/router-config.yaml \ - -n apollo --dry-run=client -o yaml | kubectl apply -f - + vim deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml ``` -3. **Restart the router deployment:** - ```bash - kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo +2. **Update the `spec.routerConfig` section** with your desired settings: + ```yaml + spec: + routerConfig: + supergraph: + listen: 0.0.0.0:4000 + introspection: true + # ... other router settings ``` -4. **Wait for rollout to complete:** +3. **Apply the changes:** ```bash - kubectl rollout status deployment/reference-architecture-${ENVIRONMENT} -n apollo + kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml ``` -### Updating Rhai Scripts - -To update the Rhai scripts: +4. **The operator automatically:** + - Updates the router deployment with the new configuration + - Rolls out the changes to all router pods + - No manual patching or restarts needed! -1. **Edit the script file:** - ```bash - vim deploy/operator-resources/rhai/main.rhai - ``` - -2. **Update the ConfigMap:** - ```bash - kubectl create configmap rhai-scripts \ - --from-file=main.rhai=deploy/operator-resources/rhai/main.rhai \ - -n apollo --dry-run=client -o yaml | kubectl apply -f - - ``` - -3. **Restart the router deployment:** - ```bash - kubectl rollout restart deployment/reference-architecture-${ENVIRONMENT} -n apollo - ``` - -**Note:** If you need to re-apply the router configuration patching (e.g., after operator updates the deployment), you can re-run script 08: - -```bash -./scripts/minikube/08-apply-router-config.sh -``` +**Note:** Router configuration is now managed entirely through the Supergraph CRD. No additional scripts are needed. ## Step 6: Logging Into the Client Application -If you deployed the client application (script 10), you can log in using the following test credentials: +If you deployed the client application (script 09), you can log in using the following test credentials: ### Test Users @@ -381,7 +340,7 @@ To create a new environment (e.g., "prod"): export ENVIRONMENT="prod" ``` -2. Run scripts 02-10 again with the new environment: +2. Run scripts 02-09 again with the new environment: ```bash ./scripts/minikube/02-setup-apollo-graph.sh @@ -391,9 +350,8 @@ source .env ./scripts/minikube/05-deploy-subgraphs.sh ./scripts/minikube/06-deploy-coprocessor.sh ./scripts/minikube/07-deploy-operator-resources.sh -./scripts/minikube/08-apply-router-config.sh -./scripts/minikube/09-deploy-ingress.sh -./scripts/minikube/10-deploy-client.sh +./scripts/minikube/08-deploy-ingress.sh +./scripts/minikube/09-deploy-client.sh ``` Each environment will have: @@ -445,7 +403,7 @@ If the router is not picking up configuration changes: 1. **Verify ConfigMaps exist:** ```bash kubectl get configmap router-config -n apollo - kubectl get configmap rhai-scripts -n apollo + kubectl get supergraph reference-architecture-${ENVIRONMENT} -n apollo -o yaml | grep -A 50 routerConfig ``` 2. **Check volume mounts:** @@ -468,7 +426,7 @@ If the router is not picking up configuration changes: 5. **Re-apply router configuration:** ```bash - ./scripts/minikube/08-apply-router-config.sh + kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml ``` ### Router pods in CrashLoopBackOff @@ -481,17 +439,14 @@ If router pods are crashing: ``` 2. **Common causes:** - - Invalid YAML in `router-config.yaml` (check syntax) - - Missing ConfigMap (verify ConfigMaps exist) - - Volume mount path incorrect (should be `/etc/router`) - - Configuration file not found (check `--config` argument) + - Invalid YAML in `spec.routerConfig` (check syntax in Supergraph CRD) + - Schema composition issues (check SupergraphSchema status) + - Missing coprocessor (verify coprocessor is running) 3. **Verify configuration syntax:** ```bash - # Check if router-config.yaml is valid YAML - kubectl create configmap router-config \ - --from-file=router.yaml=deploy/operator-resources/router-config.yaml \ - -n apollo --dry-run=client -o yaml | kubectl apply -f - --dry-run=client + # Check if Supergraph CRD has valid routerConfig + kubectl get supergraph reference-architecture-${ENVIRONMENT} -n apollo -o yaml | grep -A 50 routerConfig ``` ### Ingress not working diff --git a/scripts/minikube/07-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh index 4175146..912bdd2 100755 --- a/scripts/minikube/07-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -45,24 +45,6 @@ kubectl create namespace apollo --dry-run=client -o yaml | kubectl apply -f - # Resource name based on environment RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" -# Create router configuration ConfigMap -echo "Creating router configuration ConfigMap..." -kubectl create configmap router-config \ - --from-file=router.yaml=deploy/operator-resources/router-config.yaml \ - -n apollo \ - --dry-run=client -o yaml | kubectl apply -f - - -echo "Router configuration ConfigMap created" - -# Create Rhai scripts ConfigMap -echo "Creating Rhai scripts ConfigMap..." -kubectl create configmap rhai-scripts \ - --from-file=main.rhai=deploy/operator-resources/rhai/main.rhai \ - -n apollo \ - --dry-run=client -o yaml | kubectl apply -f - - -echo "Rhai scripts ConfigMap created" - # Deploy SupergraphSchema echo "Deploying SupergraphSchema..." @@ -83,9 +65,8 @@ echo "SupergraphSchema deployed" echo "Waiting for schema composition..." sleep 5 -# Deploy Supergraph with ConfigMap-mounted router configuration -# The router configuration is loaded from the ConfigMap and mounted as a volume -# The router will use the --config flag to reference the mounted file +# Deploy Supergraph +# Router configuration is now included in the Supergraph CRD via spec.routerConfig echo "Deploying Supergraph..." # Check for environment-specific Supergraph file first, then use template @@ -129,5 +110,5 @@ echo "Monitor router status with:" echo " kubectl get supergraphs -n apollo" echo " kubectl get pods -n apollo" echo "" -echo "Next step: Run 08-apply-router-config.sh to configure the router with custom settings" +echo "Next step: Run 08-deploy-ingress.sh to setup external access" diff --git a/scripts/minikube/08-apply-router-config.sh b/scripts/minikube/08-apply-router-config.sh deleted file mode 100755 index 986ccb5..0000000 --- a/scripts/minikube/08-apply-router-config.sh +++ /dev/null @@ -1,304 +0,0 @@ -#!/bin/bash -set -euo pipefail - -# Script 08: Apply Router Configuration -# This script patches the router deployment to use the router-config ConfigMap -# Note: Script 07 must be run first to create the Supergraph and ConfigMap - -echo "=== Step 08: Applying Router Configuration ===" - -# Load environment variables from .env if it exists -if [ -f .env ]; then - echo "Loading environment variables from .env..." - source .env -fi - -# Validate required variables -if [[ -z "${ENVIRONMENT:-}" ]]; then - echo "Error: ENVIRONMENT is required" - echo "Please set ENVIRONMENT in your .env file or export it:" - echo " export ENVIRONMENT=\"dev\"" - exit 1 -fi - -# Check if kubectl is available -if ! command -v kubectl &> /dev/null; then - echo "Error: kubectl is not installed" - exit 1 -fi - -# Verify cluster connection -if ! kubectl cluster-info &> /dev/null; then - echo "Error: Cannot connect to Kubernetes cluster" - exit 1 -fi - -# Resource name based on environment -RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" -DEPLOYMENT_NAME="${RESOURCE_NAME}" - -# Verify ConfigMaps exist -if ! kubectl get configmap router-config -n apollo &>/dev/null; then - echo "Error: router-config ConfigMap not found" - echo "Please run 07-deploy-operator-resources.sh first to create the ConfigMap" - exit 1 -fi - -if ! kubectl get configmap rhai-scripts -n apollo &>/dev/null; then - echo "Error: rhai-scripts ConfigMap not found" - echo "Please run 07-deploy-operator-resources.sh first to create the ConfigMap" - exit 1 -fi - -# Wait for router deployment to be created -echo "Waiting for router deployment to be created..." -for i in {1..60}; do - if kubectl get deployment ${DEPLOYMENT_NAME} -n apollo &>/dev/null; then - echo "Router deployment found" - break - fi - echo " Waiting for deployment... ($i/60)" - sleep 2 -done - -if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo &>/dev/null; then - echo "Error: Router deployment not found after waiting" - echo "Please check the Supergraph status:" - echo " kubectl get supergraph ${RESOURCE_NAME} -n apollo" - exit 1 -fi - -# Patch the router deployment to mount the ConfigMap and use it -echo "Patching router deployment to use ConfigMap..." - -# IMPORTANT: We do NOT modify the operator's ConfigMap (it contains the supergraph schema). -# Instead, we add our own ConfigMap for router configuration alongside the operator's ConfigMap. -# The operator's ConfigMap is managed by the operator and should not be modified directly. - -# Check if operator's ConfigMap volume exists (for informational purposes) -VOLUMES_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes}' || echo "[]") -OPERATOR_CONFIGMAP_FOUND=false -OPERATOR_VOLUME_NAME="" - -# Check each volume to see if it points to the operator's ConfigMap -for vol_json in $(echo "$VOLUMES_JSON" | jq -c '.[]'); do - CONFIGMAP_NAME=$(echo "$vol_json" | jq -r '.configMap.name // ""') - if [[ -n "$CONFIGMAP_NAME" && "$CONFIGMAP_NAME" =~ ^reference-architecture.*-config- ]]; then - OPERATOR_CONFIGMAP_FOUND=true - OPERATOR_VOLUME_NAME=$(echo "$vol_json" | jq -r '.name') - echo " Found operator ConfigMap volume '$OPERATOR_VOLUME_NAME' pointing to '$CONFIGMAP_NAME' (keeping it intact)" - break - fi -done - -# Add our router-config volume (don't replace the operator's) -if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "router-config"; then - echo " Adding router-config volume (alongside operator's ConfigMap)..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "router-config", - "configMap": { - "name": "router-config" - } - } - } - ]' - echo " Added router-config volume" -else - echo " router-config volume already exists" -fi - -# Add volumeMount for router-config (keep operator's mount intact) -echo "Adding router-config volumeMount..." -if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "router-config"; then - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "router-config", - "mountPath": "/etc/router", - "readOnly": true - } - } - ]' - echo " Added router-config volumeMount at /etc/router" -else - echo " router-config volumeMount already exists" -fi - -# Add Rhai scripts volume and volumeMount -echo "Adding Rhai scripts volume and volumeMount..." - -# Check if rhai-scripts volume exists -if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.volumes[*].name}' | grep -q "rhai-scripts"; then - echo " Adding rhai-scripts volume..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/volumes/-", - "value": { - "name": "rhai-scripts", - "configMap": { - "name": "rhai-scripts" - } - } - } - ]' - echo " Added rhai-scripts volume" -else - echo " rhai-scripts volume already exists" -fi - -# Check if rhai-scripts volumeMount exists -if ! kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].volumeMounts[*].name}' | grep -q "rhai-scripts"; then - echo " Adding rhai-scripts volumeMount..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "name": "rhai-scripts", - "mountPath": "/etc/rhai", - "readOnly": true - } - } - ]' - echo " Added rhai-scripts volumeMount" -else - echo " rhai-scripts volumeMount already exists" -fi - -# Check if --config args exist and replace them if needed -CURRENT_ARGS=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args[*]}' || echo "") -if [[ "$CURRENT_ARGS" =~ "--config" ]]; then - # The operator already set --config, we need to replace it - # Get the full args array as JSON - ARGS_JSON=$(kubectl get deployment ${DEPLOYMENT_NAME} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' || echo "[]") - - # Find the index of --config using a simple approach - # Convert JSON array to space-separated and find index - ARGS_LIST=$(echo "$ARGS_JSON" | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') - CONFIG_INDEX=-1 - INDEX=0 - for arg in $ARGS_LIST; do - if [[ "$arg" == "--config" ]]; then - CONFIG_INDEX=$INDEX - break - fi - INDEX=$((INDEX + 1)) - done - - if [[ $CONFIG_INDEX -ge 0 ]]; then - # Replace the --config argument and the following path argument - NEXT_INDEX=$((CONFIG_INDEX + 1)) - echo " Replacing existing --config argument at index $CONFIG_INDEX..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ - { - \"op\": \"replace\", - \"path\": \"/spec/template/spec/containers/0/args/$CONFIG_INDEX\", - \"value\": \"--config\" - }, - { - \"op\": \"replace\", - \"path\": \"/spec/template/spec/containers/0/args/$NEXT_INDEX\", - \"value\": \"/etc/router/router.yaml\" - } - ]" && echo " Successfully replaced --config arguments" || { - echo " Warning: Replace failed, trying remove-then-add approach..." - # Fallback: remove old args, then add new ones - # Remove in reverse order to avoid index shifting - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p="[ - { - \"op\": \"remove\", - \"path\": \"/spec/template/spec/containers/0/args/$NEXT_INDEX\" - }, - { - \"op\": \"remove\", - \"path\": \"/spec/template/spec/containers/0/args/$CONFIG_INDEX\" - } - ]" 2>/dev/null || true - # Add new --config args - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "--config" - }, - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "/etc/router/router.yaml" - } - ]' - echo " Added new --config arguments" - } - else - echo " Warning: Could not find --config index, adding new --config arguments..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "--config" - }, - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "/etc/router/router.yaml" - } - ]' - fi -else - # No --config exists, add it - echo " Adding --config arguments..." - kubectl patch deployment ${DEPLOYMENT_NAME} -n apollo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "--config" - }, - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "/etc/router/router.yaml" - } - ]' - echo " Added --config arguments" -fi - -echo "Router deployment patched" - -# Wait for rollout to complete -echo "Waiting for router rollout to complete..." -kubectl rollout status deployment/${DEPLOYMENT_NAME} -n apollo --timeout=300s || true - -# Wait for router to be ready -echo "Waiting for router to be ready..." -kubectl wait --for=condition=ready --timeout=300s supergraph/${RESOURCE_NAME} -n apollo || true - -echo "" -echo "✓ Router configuration applied!" -echo "" -echo "Router configuration has been applied via ConfigMap:" -echo " ConfigMap: router-config (contains router.yaml)" -echo " Mounted at: /etc/router/router.yaml" -echo " Router args: --config /etc/router/router.yaml" -echo "" -echo "Note: The operator's ConfigMap (containing the supergraph schema) is kept intact." -echo " Both ConfigMaps are mounted separately - operator's for schema, ours for config." -echo "" -echo "Rhai scripts have been mounted:" -echo " ConfigMap: rhai-scripts (contains main.rhai)" -echo " Mounted at: /etc/rhai" -echo " Scripts will log at all router lifecycle stages" -echo "" -echo "Monitor router status with:" -echo " kubectl get supergraphs -n apollo" -echo " kubectl get pods -n apollo" -echo " kubectl logs -n apollo deployment/${DEPLOYMENT_NAME}" -echo "" -echo "Next step: Run 09-deploy-ingress.sh to setup external access" - diff --git a/scripts/minikube/09-deploy-ingress.sh b/scripts/minikube/08-deploy-ingress.sh similarity index 97% rename from scripts/minikube/09-deploy-ingress.sh rename to scripts/minikube/08-deploy-ingress.sh index 0be2333..04d71d8 100755 --- a/scripts/minikube/09-deploy-ingress.sh +++ b/scripts/minikube/08-deploy-ingress.sh @@ -1,10 +1,10 @@ #!/bin/bash set -euo pipefail -# Script 09: Deploy Ingress +# Script 08: Deploy Ingress # This script sets up ingress for external access to the router -echo "=== Step 09: Deploying Ingress ===" +echo "=== Step 08: Deploying Ingress ===" # Load environment variables from .env if it exists if [ -f .env ]; then @@ -100,7 +100,7 @@ if grep -q "^export ROUTER_URL=" "$ENV_FILE"; then fi else echo "" >> "$ENV_FILE" - echo "# Router URL (generated by 09-deploy-ingress.sh)" >> "$ENV_FILE" + echo "# Router URL (generated by 08-deploy-ingress.sh)" >> "$ENV_FILE" echo "export ROUTER_URL=\"$ROUTER_URL\"" >> "$ENV_FILE" fi @@ -156,6 +156,6 @@ echo "to support minikube tunnel. The router is accessed via the client's nginx echo "" echo "The router URL has been saved to .env and will be used by the client deployment." echo "" -echo "Next step: Run 10-deploy-client.sh to deploy the client application (optional)" +echo "Next step: Run 09-deploy-client.sh to deploy the client application (optional)" diff --git a/scripts/minikube/10-deploy-client.sh b/scripts/minikube/09-deploy-client.sh similarity index 96% rename from scripts/minikube/10-deploy-client.sh rename to scripts/minikube/09-deploy-client.sh index e7025f3..77e09f1 100755 --- a/scripts/minikube/10-deploy-client.sh +++ b/scripts/minikube/09-deploy-client.sh @@ -1,10 +1,10 @@ #!/bin/bash set -euo pipefail -# Script 10: Deploy Client +# Script 09: Deploy Client # This script deploys the client application (optional) -echo "=== Step 10: Deploying Client Application ===" +echo "=== Step 09: Deploying Client Application ===" # Load environment variables from .env if it exists if [ -f .env ]; then @@ -41,7 +41,7 @@ fi # Get router URL from .env file if [[ -z "${ROUTER_URL:-}" ]]; then echo "Error: ROUTER_URL is not set" - echo "Please run 09-deploy-ingress.sh first to set up the router URL" + echo "Please run 08-deploy-ingress.sh first to set up the router URL" exit 1 fi From 310147b2f037e0375d022c91e4867407fa2ed426 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 13:55:18 -0800 Subject: [PATCH 26/31] Rename and refactor ingress deployment script: Change script name from `08-deploy-ingress.sh` to `08-setup-router-access.sh` to better reflect its purpose. Update documentation and other scripts to reference the new script name, clarifying the setup process for router access without using an Ingress resource. Remove the deprecated ingress deployment script. --- docs/setup.md | 11 ++++++----- .../minikube/07-deploy-operator-resources.sh | 2 +- ...oy-ingress.sh => 08-setup-router-access.sh} | 18 +++++++++++------- scripts/minikube/09-deploy-client.sh | 2 +- 4 files changed, 19 insertions(+), 14 deletions(-) rename scripts/minikube/{08-deploy-ingress.sh => 08-setup-router-access.sh} (88%) diff --git a/docs/setup.md b/docs/setup.md index ca6afa7..b810e28 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -172,16 +172,17 @@ kubectl get pods -n apollo kubectl describe supergraph reference-architecture-${ENVIRONMENT} -n apollo ``` -### Script 08: Deploy Ingress +### Script 08: Setup Router Access ```bash -./scripts/minikube/08-deploy-ingress.sh +./scripts/minikube/08-setup-router-access.sh ``` This script: -- Deploys an Ingress resource for external access +- Enables and configures the ingress controller addon (required for the client application's Ingress resource) - Configures the ingress controller as LoadBalancer for `minikube tunnel` support -- Provides access URLs for the router +- Determines and saves the router URL to `.env` file +- **Note:** The router does not use an Ingress resource - the client's nginx proxies to it internally. The ingress controller is needed for the client's Ingress. ### Script 09: Deploy Client (Optional) @@ -350,7 +351,7 @@ source .env ./scripts/minikube/05-deploy-subgraphs.sh ./scripts/minikube/06-deploy-coprocessor.sh ./scripts/minikube/07-deploy-operator-resources.sh -./scripts/minikube/08-deploy-ingress.sh +./scripts/minikube/08-setup-router-access.sh ./scripts/minikube/09-deploy-client.sh ``` diff --git a/scripts/minikube/07-deploy-operator-resources.sh b/scripts/minikube/07-deploy-operator-resources.sh index 912bdd2..e7b5e12 100755 --- a/scripts/minikube/07-deploy-operator-resources.sh +++ b/scripts/minikube/07-deploy-operator-resources.sh @@ -110,5 +110,5 @@ echo "Monitor router status with:" echo " kubectl get supergraphs -n apollo" echo " kubectl get pods -n apollo" echo "" -echo "Next step: Run 08-deploy-ingress.sh to setup external access" +echo "Next step: Run 08-setup-router-access.sh to configure external access" diff --git a/scripts/minikube/08-deploy-ingress.sh b/scripts/minikube/08-setup-router-access.sh similarity index 88% rename from scripts/minikube/08-deploy-ingress.sh rename to scripts/minikube/08-setup-router-access.sh index 04d71d8..3a51f61 100755 --- a/scripts/minikube/08-deploy-ingress.sh +++ b/scripts/minikube/08-setup-router-access.sh @@ -1,10 +1,12 @@ #!/bin/bash set -euo pipefail -# Script 08: Deploy Ingress -# This script sets up ingress for external access to the router +# Script 08: Setup Router Access +# This script configures the ingress controller for the client application +# The ingress controller is needed for the client's Ingress resource (not the router) +# Note: The router does not use an Ingress resource - the client's nginx proxies to it internally -echo "=== Step 08: Deploying Ingress ===" +echo "=== Step 08: Setting Up Router Access ===" # Load environment variables from .env if it exists if [ -f .env ]; then @@ -32,9 +34,9 @@ if ! kubectl cluster-info &> /dev/null; then exit 1 fi -# Check if ingress addon is enabled +# Check if ingress addon is enabled (required for client's Ingress resource) if ! minikube addons list | grep -q "ingress.*enabled"; then - echo "Enabling ingress addon..." + echo "Enabling ingress addon (required for client application)..." minikube addons enable ingress echo "Waiting for ingress controller to be ready..." sleep 15 @@ -57,8 +59,10 @@ kubectl patch svc ingress-nginx-controller -n ingress-nginx -p '{"spec":{"type": # 1. The client's nginx will proxy /graphql requests to the router service internally # 2. This avoids ingress conflicts (both router and client can't use path /) # 3. The router is accessed via Kubernetes service DNS from within the cluster +# The ingress controller is needed for the CLIENT's Ingress resource, not the router echo "Note: Router ingress is not needed - the client's nginx will proxy /graphql requests to the router service" +echo " The ingress controller is required for the client application's Ingress resource" # Get router URL - use localhost for minikube tunnel (LoadBalancer) or NodePort fallback echo "Getting router URL..." @@ -100,7 +104,7 @@ if grep -q "^export ROUTER_URL=" "$ENV_FILE"; then fi else echo "" >> "$ENV_FILE" - echo "# Router URL (generated by 08-deploy-ingress.sh)" >> "$ENV_FILE" + echo "# Router URL (generated by 08-setup-router-access.sh)" >> "$ENV_FILE" echo "export ROUTER_URL=\"$ROUTER_URL\"" >> "$ENV_FILE" fi @@ -110,7 +114,7 @@ echo "Router URL saved to .env file: $ROUTER_URL" NODEPORT=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}') echo "" -echo "✓ Ingress deployed successfully!" +echo "✓ Router access configured successfully!" echo "" echo "Router URL saved to .env file: $ROUTER_URL" echo "" diff --git a/scripts/minikube/09-deploy-client.sh b/scripts/minikube/09-deploy-client.sh index 77e09f1..09caf61 100755 --- a/scripts/minikube/09-deploy-client.sh +++ b/scripts/minikube/09-deploy-client.sh @@ -41,7 +41,7 @@ fi # Get router URL from .env file if [[ -z "${ROUTER_URL:-}" ]]; then echo "Error: ROUTER_URL is not set" - echo "Please run 08-deploy-ingress.sh first to set up the router URL" + echo "Please run 08-setup-router-access.sh first to set up the router URL" exit 1 fi From 07ddf643e33193548ee610c12695f6909b2139be Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 14:30:24 -0800 Subject: [PATCH 27/31] Enhance cleanup and setup documentation: Clarify steps for deleting operator-managed CRDs and uninstalling Helm releases in the cleanup guide. Update setup documentation to specify environment variants in SupergraphSchema CRD and improve instructions for accessing the router. Refactor scripts to streamline router access configuration and provide clearer guidance on using minikube tunnel, NodePort, and port-forward methods. --- docs/cleanup.md | 12 +- docs/operator-guide.md | 21 ++-- docs/setup.md | 128 ++++++++++----------- scripts/minikube/02-setup-apollo-graph.sh | 2 +- scripts/minikube/08-setup-router-access.sh | 91 +++++---------- 5 files changed, 104 insertions(+), 150 deletions(-) diff --git a/docs/cleanup.md b/docs/cleanup.md index 896f9c8..c7d6412 100644 --- a/docs/cleanup.md +++ b/docs/cleanup.md @@ -6,7 +6,7 @@ This guide covers cleaning up all resources deployed to your local Minikube clus ## Delete Operator-Managed Resources -Before deleting Kubernetes resources, first remove the operator-managed CRDs. Make sure you have your `ENVIRONMENT` variable set (or load it from `.env`): +Before deleting Kubernetes resources, first remove the operator-managed CRDs. The operator creates and manages these resources, so they should be deleted before Helm releases. Make sure you have your `ENVIRONMENT` variable set (or load it from `.env`): ```bash if [ -f .env ]; then @@ -17,19 +17,19 @@ ENVIRONMENT=${ENVIRONMENT:-dev} RESOURCE_NAME="reference-architecture-${ENVIRONMENT}" ``` -Delete operator-managed resources: +Delete operator-managed CRDs: ```bash kubectl delete supergraphs ${RESOURCE_NAME} -n apollo || true kubectl delete supergraphschemas ${RESOURCE_NAME} -n apollo || true -kubectl delete ingress router -n apollo || true -kubectl delete ingress client -n client || true kubectl delete subgraph --all --all-namespaces || true ``` +**Note:** The client's Ingress resource is managed by Helm and will be automatically deleted when you uninstall the Helm release in the next step. + ## Uninstall Helm Releases -Uninstall all Helm releases: +Uninstall all Helm releases. This will automatically delete all resources created by Helm, including deployments, services, ConfigMaps, and Ingress resources: ```bash helm uninstall client -n client || true @@ -41,7 +41,7 @@ done ## Delete Namespaces -Delete all application namespaces: +After deleting CRDs and uninstalling Helm releases, delete all application namespaces. This will remove any remaining resources: ```bash kubectl delete namespace checkout discovery inventory orders products reviews shipping users || true diff --git a/docs/operator-guide.md b/docs/operator-guide.md index 6da1553..4a4f16b 100644 --- a/docs/operator-guide.md +++ b/docs/operator-guide.md @@ -234,23 +234,24 @@ kubectl logs -n apollo deployment/reference-architecture-{dev|prod} ## Updating Router Configuration -Router configuration is now handled directly via `spec.routerConfig` in the Supergraph CRD. See [Updating Router Configuration](../docs/setup.md#step-5-updating-router-configuration) in the setup guide for details. - -The Apollo GraphOS Operator's `Supergraph` CRD supports router configuration natively via `spec.routerConfig`: +Router configuration is handled directly via `spec.routerConfig` in the Supergraph CRD: 1. Edit `deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` 2. Update the `spec.routerConfig` section 3. Apply: `kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` 4. The operator automatically updates the router deployment and rolls out changes -To update router configuration without changing subgraphs: - -1. Edit `deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` -2. Update the `spec.routerConfig` section with your desired router settings -3. Apply: `kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml` -4. The operator handles the rollout automatically - no manual patching or restarts needed! +**Example:** +```yaml +spec: + routerConfig: + supergraph: + listen: 0.0.0.0:4000 + introspection: true + # ... other router settings +``` -For more details, see the [Router Configuration Migration Guide](../deploy/operator-resources/ROUTER_CONFIG_MIGRATION.md). +The operator handles the rollout automatically - no manual patching or restarts needed! ## Best Practices diff --git a/docs/setup.md b/docs/setup.md index b810e28..affcaee 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -61,7 +61,7 @@ export APOLLO_KEY="your-apollo-personal-api-key" export ENVIRONMENT="dev" ``` -The `ENVIRONMENT` variable is required and allows you to create multiple environments. Each environment will have its own Apollo GraphOS variant. +The `ENVIRONMENT` variable is required and allows you to create multiple environments. Each environment will reference its own Apollo GraphOS variant (e.g., `@dev`, `@prod`) in the SupergraphSchema CRD. Variants are created automatically when schemas are first published to them. **Note:** When deploying subgraphs, the scripts will look for environment-specific values files at `subgraphs/{subgraph}/deploy/environments/${ENVIRONMENT}.yaml`. If this file exists, it will be used to override the default `values.yaml`. If it doesn't exist, the default `values.yaml` will be used. The repository includes `dev.yaml` and `prod.yaml` files for all subgraphs. If you create a custom environment name, you can optionally create matching values files for environment-specific configurations. @@ -89,11 +89,12 @@ This script: This script: - Creates an Apollo GraphOS graph - Creates an Operator API key -- Creates a variant for your environment - Saves configuration to `.env` **Note:** Make sure your `.env` file has `APOLLO_KEY` set before running this script. +**Note:** Variants (e.g., `@dev`, `@prod`) are referenced in the SupergraphSchema CRD and will be created automatically when schemas are first published to those variants. + ### Script 03: Setup Kubernetes Cluster ```bash @@ -184,7 +185,7 @@ This script: - Determines and saves the router URL to `.env` file - **Note:** The router does not use an Ingress resource - the client's nginx proxies to it internally. The ingress controller is needed for the client's Ingress. -### Script 09: Deploy Client (Optional) +### Script 09: Deploy Client ```bash ./scripts/minikube/09-deploy-client.sh @@ -194,11 +195,15 @@ This script: - Builds and deploys the client application - Sets up ingress for client access +**Note:** The client is required if you want to access the router via the ingress controller (minikube tunnel or NodePort). If you only need direct router access, you can use port-forward (Option 2 in Step 4) and skip this script. + ## Step 4: Access Your Supergraph After running all scripts, you can access your supergraph in several ways: -### Option 1: Using Minikube Tunnel (recommended for LoadBalancer access) +### Option 1: Using Minikube Tunnel (requires Script 09) + +**Note:** This option requires the client application to be deployed (Script 09) because it uses the client's Ingress resource. The ingress controller has been configured as a LoadBalancer service. To access it via `minikube tunnel`: @@ -206,20 +211,25 @@ The ingress controller has been configured as a LoadBalancer service. To access minikube tunnel ``` +**Troubleshooting if tunnel hangs:** +- Check if tunnel is already running: `ps aux | grep 'minikube tunnel'` +- Stop existing tunnel: `pkill -f 'minikube tunnel'` +- Try running with explicit cleanup: `minikube tunnel --cleanup` +- On macOS, if sudo password isn't prompted, try: `sudo minikube tunnel` + **Important notes:** -- Enter your sudo password when prompted - You may see a message "Starting tunnel for service router" - **this can be safely ignored** - The "router" is an Ingress resource (not a service), so it doesn't need tunneling - Only the `ingress-nginx-controller` LoadBalancer service needs tunneling - Wait for the "Status: running" message -- Access the router at: `http://127.0.0.1/` +- Access the client UI at: `http://127.0.0.1/` **Why you see "router" in the tunnel output:** The ingress controller automatically sets a LoadBalancer status on Ingress resources, which makes `minikube tunnel` think it needs to tunnel them. However, since the ingress controller is already being tunneled, the router is accessible through it. You can safely ignore this message. -### Option 2: Using Port Forwarding +### Option 2: Using Port Forwarding (no client required) -Port forward directly to the router service: +Port forward directly to the router service. This method does not require the client application: ```bash kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80 @@ -229,7 +239,9 @@ Then access at `http://localhost:4000` in your browser. **Note:** Keep the port-forward command running in a terminal while you access the router. -### Option 3: Using Ingress via NodePort +### Option 3: Using Ingress via NodePort (requires Script 09) + +**Note:** This option requires the client application to be deployed (Script 09) because it uses the client's Ingress resource. Get the Minikube IP and ingress controller NodePort: @@ -257,51 +269,7 @@ Or test the health endpoint (if accessible on the main port): curl http://localhost:4000/health ``` -## Step 5: Updating Router Configuration - -Router configuration is now handled directly in the Supergraph CRD via `spec.routerConfig`. To update the router configuration: - -### Router Configuration via Supergraph CRD - -The Apollo GraphOS Operator supports router configuration natively via the `spec.routerConfig` property in the Supergraph CRD. This means: - -1. **No manual patching required** - The operator handles everything automatically -2. **Declarative configuration** - All router settings are in the Supergraph YAML -3. **Automatic rollout** - Changes are applied automatically when you update the CRD - -### Updating Router Configuration - -To update the router configuration: - -1. **Edit the Supergraph resource:** - ```bash - # Edit the router configuration - vim deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml - ``` - -2. **Update the `spec.routerConfig` section** with your desired settings: - ```yaml - spec: - routerConfig: - supergraph: - listen: 0.0.0.0:4000 - introspection: true - # ... other router settings - ``` - -3. **Apply the changes:** - ```bash - kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml - ``` - -4. **The operator automatically:** - - Updates the router deployment with the new configuration - - Rolls out the changes to all router pods - - No manual patching or restarts needed! - -**Note:** Router configuration is now managed entirely through the Supergraph CRD. No additional scripts are needed. - -## Step 6: Logging Into the Client Application +## Step 5: Logging Into the Client Application If you deployed the client application (script 09), you can log in using the following test credentials: @@ -356,7 +324,7 @@ source .env ``` Each environment will have: -- Its own Apollo GraphOS variant +- Its own Apollo GraphOS variant (created automatically when schemas are published) - Separate Kubernetes resources (namespaces, services, etc.) - Its own router instance @@ -401,35 +369,29 @@ kubectl logs -n apollo deployment/reference-architecture-${ENVIRONMENT} If the router is not picking up configuration changes: -1. **Verify ConfigMaps exist:** +1. **Verify routerConfig in Supergraph CRD:** ```bash - kubectl get configmap router-config -n apollo kubectl get supergraph reference-architecture-${ENVIRONMENT} -n apollo -o yaml | grep -A 50 routerConfig ``` -2. **Check volume mounts:** - ```bash - kubectl describe deployment reference-architecture-${ENVIRONMENT} -n apollo | grep -A 10 "Volumes:" - kubectl describe pod -n apollo | grep -A 10 "Mounts:" - ``` - -3. **Verify container arguments:** +2. **Check router deployment status:** ```bash - kubectl get deployment reference-architecture-${ENVIRONMENT} -n apollo -o jsonpath='{.spec.template.spec.containers[0].args}' + kubectl describe deployment reference-architecture-${ENVIRONMENT} -n apollo + kubectl get pods -n apollo ``` - - Should include `--config /etc/router/router.yaml` -4. **Check router logs for configuration errors:** +3. **Check router logs for configuration errors:** ```bash kubectl logs -n apollo deployment/reference-architecture-${ENVIRONMENT} | grep -i "config\|error" ``` -5. **Re-apply router configuration:** +4. **Re-apply router configuration:** ```bash kubectl apply -f deploy/operator-resources/supergraph-${ENVIRONMENT}.yaml ``` +For more details on updating router configuration, see the [Operator Guide](./operator-guide.md#updating-router-configuration). + ### Router pods in CrashLoopBackOff If router pods are crashing: @@ -459,6 +421,34 @@ minikube addons enable ingress kubectl get pods -n ingress-nginx ``` +### Minikube tunnel hangs or doesn't prompt for password + +If `minikube tunnel` hangs without prompting for your sudo password: + +1. **Check if tunnel is already running:** + ```bash + ps aux | grep 'minikube tunnel' + ``` + +2. **Stop any existing tunnel processes:** + ```bash + pkill -f 'minikube tunnel' + ``` + +3. **Try running with cleanup flag:** + ```bash + minikube tunnel --cleanup + ``` + +4. **On macOS, try running with sudo:** + ```bash + sudo minikube tunnel + ``` + +5. **Alternative: Use NodePort or port-forward instead:** + - NodePort: Access via `http://$(minikube ip):$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')` + - Port-forward: `kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80` + ## Next Steps - Read the [Operator Guide](./operator-guide.md) to understand how the Apollo GraphOS Operator works diff --git a/scripts/minikube/02-setup-apollo-graph.sh b/scripts/minikube/02-setup-apollo-graph.sh index f4a55d9..cff0469 100755 --- a/scripts/minikube/02-setup-apollo-graph.sh +++ b/scripts/minikube/02-setup-apollo-graph.sh @@ -2,7 +2,7 @@ set -euo pipefail # Script 02: Setup Apollo GraphOS Graph -# This script creates an Apollo GraphOS graph and variants, and generates API keys +# This script creates an Apollo GraphOS graph and generates API keys echo "=== Step 02: Setting up Apollo GraphOS Graph ===" diff --git a/scripts/minikube/08-setup-router-access.sh b/scripts/minikube/08-setup-router-access.sh index 3a51f61..feb0cc2 100755 --- a/scripts/minikube/08-setup-router-access.sh +++ b/scripts/minikube/08-setup-router-access.sh @@ -55,37 +55,18 @@ kubectl wait --namespace ingress-nginx \ echo "Configuring ingress controller for minikube tunnel..." kubectl patch svc ingress-nginx-controller -n ingress-nginx -p '{"spec":{"type":"LoadBalancer"}}' 2>/dev/null || true -# Note: We don't create a router ingress here because: -# 1. The client's nginx will proxy /graphql requests to the router service internally -# 2. This avoids ingress conflicts (both router and client can't use path /) -# 3. The router is accessed via Kubernetes service DNS from within the cluster -# The ingress controller is needed for the CLIENT's Ingress resource, not the router - -echo "Note: Router ingress is not needed - the client's nginx will proxy /graphql requests to the router service" -echo " The ingress controller is required for the client application's Ingress resource" - # Get router URL - use localhost for minikube tunnel (LoadBalancer) or NodePort fallback -echo "Getting router URL..." MINIKUBE_IP=$(minikube ip) INGRESS_NODEPORT=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}' 2>/dev/null || echo "") - -# Check if ingress controller is LoadBalancer (for minikube tunnel) INGRESS_TYPE=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.type}' 2>/dev/null || echo "") -# Use localhost for LoadBalancer (minikube tunnel), or NodePort, or default to localhost:4000 for port-forward -# Note: The client's nginx will proxy /graphql requests to the router +# Determine router URL based on ingress type if [ "$INGRESS_TYPE" == "LoadBalancer" ]; then ROUTER_URL="http://127.0.0.1/graphql" - echo "Using localhost URL for minikube tunnel: $ROUTER_URL" - echo "Note: Run 'minikube tunnel' in a separate terminal to access the router" - echo "Note: The client's nginx will proxy /graphql requests to the router service" elif [ -n "$INGRESS_NODEPORT" ]; then ROUTER_URL="http://${MINIKUBE_IP}:${INGRESS_NODEPORT}/graphql" - echo "Using ingress NodePort URL: $ROUTER_URL" else - # Default to localhost for port-forward (user will need to run port-forward separately) ROUTER_URL="http://localhost:4000/graphql" - echo "Using default localhost URL (use 'kubectl port-forward' to access): $ROUTER_URL" fi # Save to .env file @@ -96,7 +77,6 @@ fi # Remove old ROUTER_URL if it exists and add new one if grep -q "^export ROUTER_URL=" "$ENV_FILE"; then - # Use a temp file for sed compatibility across platforms if [[ "$OSTYPE" == "darwin"* ]]; then sed -i '' "s|^export ROUTER_URL=.*|export ROUTER_URL=\"$ROUTER_URL\"|" "$ENV_FILE" else @@ -108,58 +88,41 @@ else echo "export ROUTER_URL=\"$ROUTER_URL\"" >> "$ENV_FILE" fi -echo "Router URL saved to .env file: $ROUTER_URL" - -# Get the NodePort for the ingress controller (for reference) -NODEPORT=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}') - +# Output summary echo "" echo "✓ Router access configured successfully!" echo "" -echo "Router URL saved to .env file: $ROUTER_URL" -echo "" -if [ "$INGRESS_TYPE" == "LoadBalancer" ]; then - echo "⚠️ IMPORTANT: The router URL has been set to $ROUTER_URL for minikube tunnel." - echo " You MUST run 'minikube tunnel' for the router to be accessible." - echo " If you prefer a different access method, update ROUTER_URL in .env file." - echo "" -fi -echo "To access the router, use one of these methods:" +echo "Router URL: $ROUTER_URL (saved to .env)" echo "" + if [ "$INGRESS_TYPE" == "LoadBalancer" ]; then -echo "Option 1: Use minikube tunnel (REQUIRED - URL is set in .env for this method):" -echo " 1. In a separate terminal, run: minikube tunnel" -echo " 2. Enter your sudo password when prompted" -echo " 3. You may see 'Starting tunnel for service router' - this can be ignored" -echo " 4. Wait for 'Status: running' message" -echo " 5. Access the client UI at: http://127.0.0.1/" -echo " 6. GraphQL requests will be proxied to the router via /graphql" -echo "" -if [ -n "$NODEPORT" ]; then - echo "Option 2: Access via NodePort (requires updating ROUTER_URL in .env):" - echo " Client UI: http://${MINIKUBE_IP}:${NODEPORT}/" - echo " GraphQL: http://${MINIKUBE_IP}:${NODEPORT}/graphql" - echo " Then update .env: export ROUTER_URL=\"http://${MINIKUBE_IP}:${NODEPORT}/graphql\"" + echo "⚠️ Next step: Run 'minikube tunnel' in a separate terminal to access the router" echo "" -fi -echo "Option 3: Port forward (requires updating ROUTER_URL in .env):" -echo " kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80" -echo " Then update .env: export ROUTER_URL=\"http://localhost:4000/graphql\"" + echo "Access methods:" + echo " 1. minikube tunnel (recommended):" + echo " • Run: minikube tunnel" + echo " • If it hangs, check if tunnel is already running: ps aux | grep 'minikube tunnel'" + echo " • Stop existing tunnel: pkill -f 'minikube tunnel'" + echo " • Access client UI at: http://127.0.0.1/" + echo " • GraphQL requests proxied via /graphql" + if [ -n "$INGRESS_NODEPORT" ]; then + echo "" + echo " 2. NodePort (no tunnel needed):" + echo " • Client UI: http://${MINIKUBE_IP}:${INGRESS_NODEPORT}/" + echo " • Update .env: export ROUTER_URL=\"http://${MINIKUBE_IP}:${INGRESS_NODEPORT}/graphql\"" + fi echo "" + echo " 3. Port forward (no tunnel needed):" + echo " • Run: kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80" + echo " • Update .env: export ROUTER_URL=\"http://localhost:4000/graphql\"" else - echo "Option 1: Access via NodePort:" - echo " http://${MINIKUBE_IP}:${NODEPORT}" - echo "" - echo "Option 2: Port forward:" - echo " kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80" - echo " Then access at: http://localhost:4000" - echo "" + echo "Access methods:" + echo " 1. NodePort: http://${MINIKUBE_IP}:${INGRESS_NODEPORT}" + echo " 2. Port forward: kubectl port-forward service/reference-architecture-${ENVIRONMENT} -n apollo 4000:80" fi -echo "Note: The ingress controller service has been configured as LoadBalancer" -echo "to support minikube tunnel. The router is accessed via the client's nginx proxy." -echo "" -echo "The router URL has been saved to .env and will be used by the client deployment." + echo "" -echo "Next step: Run 09-deploy-client.sh to deploy the client application (optional)" +echo "Next: Run 09-deploy-client.sh to deploy the client application" +echo " (Required for ingress access; optional if using port-forward)" From 5ce1ea3cd44b62d6fd63124cb0be5d115561ebfb Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 14:39:11 -0800 Subject: [PATCH 28/31] Refactor LoginForm component: Wrap form elements in a
tag and set the button type to "submit" for improved form handling. This change enhances the user experience by ensuring proper form submission behavior. --- client/src/components/LoginForm/LoginForm.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/src/components/LoginForm/LoginForm.tsx b/client/src/components/LoginForm/LoginForm.tsx index 220c95b..a75174d 100644 --- a/client/src/components/LoginForm/LoginForm.tsx +++ b/client/src/components/LoginForm/LoginForm.tsx @@ -87,7 +87,7 @@ const LoginForm = () => { return ( <> {!data && ( - <> + Username { Forgot password? - + )} {!loading && ( From 2c8316d578ae5979f168724c04d33dd7dc9c762b Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 15:21:36 -0800 Subject: [PATCH 29/31] Remove deprecated subgraph configuration files for checkout, discovery, inventory, orders, products, reviews, shipping, and users in both development and production environments. This cleanup enhances the clarity and maintainability of the project by eliminating unused resources. --- subgraphs/checkout/k8s/subgraph-dev.yaml | 15 --------------- subgraphs/checkout/k8s/subgraph-prod.yaml | 15 --------------- subgraphs/discovery/k8s/subgraph-dev.yaml | 14 -------------- subgraphs/discovery/k8s/subgraph-prod.yaml | 14 -------------- subgraphs/inventory/k8s/subgraph-dev.yaml | 14 -------------- subgraphs/inventory/k8s/subgraph-prod.yaml | 14 -------------- subgraphs/orders/k8s/subgraph-dev.yaml | 14 -------------- subgraphs/orders/k8s/subgraph-prod.yaml | 14 -------------- subgraphs/products/k8s/subgraph-dev.yaml | 14 -------------- subgraphs/products/k8s/subgraph-prod.yaml | 14 -------------- subgraphs/reviews/k8s/subgraph-dev.yaml | 14 -------------- subgraphs/reviews/k8s/subgraph-prod.yaml | 14 -------------- subgraphs/shipping/k8s/subgraph-dev.yaml | 14 -------------- subgraphs/shipping/k8s/subgraph-prod.yaml | 14 -------------- subgraphs/users/k8s/subgraph-dev.yaml | 14 -------------- subgraphs/users/k8s/subgraph-prod.yaml | 14 -------------- 16 files changed, 226 deletions(-) delete mode 100644 subgraphs/checkout/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/checkout/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/discovery/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/discovery/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/inventory/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/inventory/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/orders/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/orders/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/products/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/products/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/reviews/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/reviews/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/shipping/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/shipping/k8s/subgraph-prod.yaml delete mode 100644 subgraphs/users/k8s/subgraph-dev.yaml delete mode 100644 subgraphs/users/k8s/subgraph-prod.yaml diff --git a/subgraphs/checkout/k8s/subgraph-dev.yaml b/subgraphs/checkout/k8s/subgraph-dev.yaml deleted file mode 100644 index 70a6fa8..0000000 --- a/subgraphs/checkout/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: checkout - namespace: checkout - labels: - app: checkout - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.checkout.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/checkout:main - path: /app/schema.graphql - diff --git a/subgraphs/checkout/k8s/subgraph-prod.yaml b/subgraphs/checkout/k8s/subgraph-prod.yaml deleted file mode 100644 index 70a6fa8..0000000 --- a/subgraphs/checkout/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: checkout - namespace: checkout - labels: - app: checkout - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.checkout.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/checkout:main - path: /app/schema.graphql - diff --git a/subgraphs/discovery/k8s/subgraph-dev.yaml b/subgraphs/discovery/k8s/subgraph-dev.yaml deleted file mode 100644 index f69c8ac..0000000 --- a/subgraphs/discovery/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: discovery - namespace: discovery - labels: - app: discovery - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.discovery.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/discovery:main - path: /app/schema.graphql diff --git a/subgraphs/discovery/k8s/subgraph-prod.yaml b/subgraphs/discovery/k8s/subgraph-prod.yaml deleted file mode 100644 index f69c8ac..0000000 --- a/subgraphs/discovery/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: discovery - namespace: discovery - labels: - app: discovery - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.discovery.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/discovery:main - path: /app/schema.graphql diff --git a/subgraphs/inventory/k8s/subgraph-dev.yaml b/subgraphs/inventory/k8s/subgraph-dev.yaml deleted file mode 100644 index 1dfac56..0000000 --- a/subgraphs/inventory/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: inventory - namespace: inventory - labels: - app: inventory - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.inventory.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/inventory:main - path: /app/schema.graphql diff --git a/subgraphs/inventory/k8s/subgraph-prod.yaml b/subgraphs/inventory/k8s/subgraph-prod.yaml deleted file mode 100644 index 1dfac56..0000000 --- a/subgraphs/inventory/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: inventory - namespace: inventory - labels: - app: inventory - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.inventory.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/inventory:main - path: /app/schema.graphql diff --git a/subgraphs/orders/k8s/subgraph-dev.yaml b/subgraphs/orders/k8s/subgraph-dev.yaml deleted file mode 100644 index 1793d41..0000000 --- a/subgraphs/orders/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: orders - namespace: orders - labels: - app: orders - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.orders.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/orders:main - path: /app/schema.graphql diff --git a/subgraphs/orders/k8s/subgraph-prod.yaml b/subgraphs/orders/k8s/subgraph-prod.yaml deleted file mode 100644 index 1793d41..0000000 --- a/subgraphs/orders/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: orders - namespace: orders - labels: - app: orders - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.orders.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/orders:main - path: /app/schema.graphql diff --git a/subgraphs/products/k8s/subgraph-dev.yaml b/subgraphs/products/k8s/subgraph-dev.yaml deleted file mode 100644 index 980c79d..0000000 --- a/subgraphs/products/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: products - namespace: products - labels: - app: products - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.products.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/products:main - path: /app/schema.graphql diff --git a/subgraphs/products/k8s/subgraph-prod.yaml b/subgraphs/products/k8s/subgraph-prod.yaml deleted file mode 100644 index 980c79d..0000000 --- a/subgraphs/products/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: products - namespace: products - labels: - app: products - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.products.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/products:main - path: /app/schema.graphql diff --git a/subgraphs/reviews/k8s/subgraph-dev.yaml b/subgraphs/reviews/k8s/subgraph-dev.yaml deleted file mode 100644 index 5fb5903..0000000 --- a/subgraphs/reviews/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: reviews - namespace: reviews - labels: - app: reviews - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.reviews.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/reviews:main - path: /app/schema.graphql diff --git a/subgraphs/reviews/k8s/subgraph-prod.yaml b/subgraphs/reviews/k8s/subgraph-prod.yaml deleted file mode 100644 index 5fb5903..0000000 --- a/subgraphs/reviews/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: reviews - namespace: reviews - labels: - app: reviews - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.reviews.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/reviews:main - path: /app/schema.graphql diff --git a/subgraphs/shipping/k8s/subgraph-dev.yaml b/subgraphs/shipping/k8s/subgraph-dev.yaml deleted file mode 100644 index 5178e7b..0000000 --- a/subgraphs/shipping/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: shipping - namespace: shipping - labels: - app: shipping - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.shipping.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/shipping:main - path: /app/schema.graphql diff --git a/subgraphs/shipping/k8s/subgraph-prod.yaml b/subgraphs/shipping/k8s/subgraph-prod.yaml deleted file mode 100644 index 5178e7b..0000000 --- a/subgraphs/shipping/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: shipping - namespace: shipping - labels: - app: shipping - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.shipping.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/shipping:main - path: /app/schema.graphql diff --git a/subgraphs/users/k8s/subgraph-dev.yaml b/subgraphs/users/k8s/subgraph-dev.yaml deleted file mode 100644 index 15a5296..0000000 --- a/subgraphs/users/k8s/subgraph-dev.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: users - namespace: users - labels: - app: users - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.users.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/users:main - path: /app/schema.graphql diff --git a/subgraphs/users/k8s/subgraph-prod.yaml b/subgraphs/users/k8s/subgraph-prod.yaml deleted file mode 100644 index 15a5296..0000000 --- a/subgraphs/users/k8s/subgraph-prod.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apollographql.com/v1alpha2 -kind: Subgraph -metadata: - name: users - namespace: users - labels: - app: users - apollo.io/subgraph: "true" -spec: - endpoint: http://graphql.users.svc.cluster.local:4001 - schema: - ociImage: - reference: ghcr.io/${GITHUB_ORG}/implemented-reference-architecture/users:main - path: /app/schema.graphql From 35bf152be074201d31a00300483b94746a98c0b4 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 15:33:10 -0800 Subject: [PATCH 30/31] Deleting unnecessary file --- terraform/minikube/.terraform.lock.hcl | 42 -------------------------- 1 file changed, 42 deletions(-) delete mode 100644 terraform/minikube/.terraform.lock.hcl diff --git a/terraform/minikube/.terraform.lock.hcl b/terraform/minikube/.terraform.lock.hcl deleted file mode 100644 index 9042ce2..0000000 --- a/terraform/minikube/.terraform.lock.hcl +++ /dev/null @@ -1,42 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/local" { - version = "2.4.1" - hashes = [ - "h1:gpp25uNkYJYzJVnkyRr7RIBVfwLs9GSq2HNnFpTRBg0=", - "zh:244b445bf34ddbd167731cc6c6b95bbed231dc4493f8cc34bd6850cfe1f78528", - "zh:3c330bdb626123228a0d1b1daa6c741b4d5d484ab1c7ae5d2f48d4c9885cc5e9", - "zh:5ff5f9b791ddd7557e815449173f2db38d338e674d2d91800ac6e6d808de1d1d", - "zh:70206147104f4bf26ae67d730c995772f85bf23e28c2c2e7612c74f4dae3c46f", - "zh:75029676993accd6bef933c196b2fad51a9ec8a69a847dbbe96ec8ebf7926cdc", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7d48d5999fe1fcdae9295a7c3448ac1541f5a24c474bd82df6d4fa3732483f2b", - "zh:b766b38b027f0f84028244d1c2f990431a37d4fc3ac645962924554016507e77", - "zh:bfc7ad301dada204cf51c59d8bd6a9a87de5fddb42190b4d6ba157d6e08a1f10", - "zh:c902b527702a8c5e2c25a6637d07bbb1690cb6c1e63917a5f6dc460efd18d43f", - "zh:d68ae0e1070cf429c46586bc87580c3ed113f76241da2b6e4f1a8348126b3c46", - "zh:f4903fd89f7c92a346ae9e666c2d0b6884c4474ae109e9b4bd15e7efaa4bfc29", - ] -} - -provider "registry.terraform.io/integrations/github" { - version = "5.45.0" - hashes = [ - "h1:sP/Er9osOsz4vhKZAul+GeV0c5XdvMblJBMiP+T5tWc=", - "zh:2afb8ee5b847071e51d5a39bcad5cf466c4d22452450d37c44a5f9d2eb9879e5", - "zh:38d087b88c86ddd63b60d14d613f86a5885d154048098c0484266a9a69018b16", - "zh:3e6a787e3e40f1535d85f8dc5f2e8c90242ab8237feebd027f696fa154261394", - "zh:55dac5a813b3774b48ca45b8a797c32e6d787d4f282b43b622155cad3daac46a", - "zh:563f2782f3c4c584b249c5fa0628951a57b4593f3c5805a4efb6d494f8686716", - "zh:677180ec9376d5f926286592998e2864c85f06d6b416c1d89031d817a285c72e", - "zh:80eec141fa47131e8f60a6478e51b3a5920efe803444e684f9605fca09a24e34", - "zh:8b9f1e1f4b42b51e53767f4f927eabdcefe55fb0369e996ac2a0063148b5e48d", - "zh:95627f75848561830f8c20949f024f902a2100a022c68aa8d84320f43e75cc46", - "zh:95ac41b99dfca3ce556092e036bb04dc03367d0779071112e59d4bf11259a89d", - "zh:9e966482729ba8214b480bdd786aff9a15234e9c093c5406b56ce89ccb07dcab", - "zh:b7a9d563613f1b9a233f8f285848cc9d8c08c556aad7ea57cd63e0abb19b10cf", - "zh:ce56bb7ca876f47f5beee01de3ab84d27964b972c9adceb8e2f7824891e05c27", - "zh:f73e063ad5b84f1943eafb8a52a26dd805d06ac11d6c951175ac76c07187f553", - ] -} From 12d895a1fad355ff3a36a5e685b2240ca8ec7695 Mon Sep 17 00:00:00 2001 From: "andy.garcia" Date: Tue, 11 Nov 2025 15:39:59 -0800 Subject: [PATCH 31/31] Remove deprecated router development scripts and configuration files. Update package.json to eliminate the dev:router script and add a TODO for local router development workflow. This cleanup enhances project maintainability by removing unused resources and outlining future development plans. --- TODO.md | 6 + dev/configuration_schema.json | 9133 --------------------------------- dev/dev.sh | 18 - dev/router.yaml | 42 - dev/supergraph.yaml | 35 - package.json | 1 - 6 files changed, 6 insertions(+), 9229 deletions(-) delete mode 100644 dev/configuration_schema.json delete mode 100755 dev/dev.sh delete mode 100644 dev/router.yaml delete mode 100644 dev/supergraph.yaml diff --git a/TODO.md b/TODO.md index 8c8fe71..9a5bd33 100644 --- a/TODO.md +++ b/TODO.md @@ -10,6 +10,12 @@ This file tracks future improvements and features for the reference architecture - Add deployment scripts - Update documentation +- [ ] Add local router development workflow + - Document how to run router locally for faster iteration + - Set up local router configuration that matches Kubernetes setup + - Create scripts/guide for local router development without Kubernetes + - Enable testing router config changes locally before deploying + ## Monitoring & Observability - [ ] Implement Zipkin for distributed tracing diff --git a/dev/configuration_schema.json b/dev/configuration_schema.json deleted file mode 100644 index 6b222f4..0000000 --- a/dev/configuration_schema.json +++ /dev/null @@ -1,9133 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Configuration", - "description": "The configuration for the router.\n\nCan be created through `serde::Deserialize` from various formats, or inline in Rust code with `serde_json::json!` and `serde_json::from_value`.", - "type": "object", - "properties": { - "apq": { - "description": "#/definitions/Apq", - "$ref": "#/definitions/Apq" - }, - "authentication": { - "description": "#/definitions/Conf", - "$ref": "#/definitions/Conf" - }, - "authorization": { - "description": "#/definitions/Conf2", - "$ref": "#/definitions/Conf2" - }, - "batching": { - "description": "#/definitions/Batching", - "$ref": "#/definitions/Batching" - }, - "connectors": { - "description": "#/definitions/ConnectorsConfig", - "$ref": "#/definitions/ConnectorsConfig" - }, - "coprocessor": { - "description": "#/definitions/Conf3", - "$ref": "#/definitions/Conf3" - }, - "cors": { - "description": "#/definitions/Cors", - "$ref": "#/definitions/Cors" - }, - "csrf": { - "description": "#/definitions/CSRFConfig", - "$ref": "#/definitions/CSRFConfig" - }, - "demand_control": { - "description": "#/definitions/DemandControlConfig", - "$ref": "#/definitions/DemandControlConfig" - }, - "enhanced_client_awareness": { - "description": "#/definitions/Config7", - "$ref": "#/definitions/Config7" - }, - "experimental_chaos": { - "description": "#/definitions/Chaos", - "$ref": "#/definitions/Chaos" - }, - "experimental_type_conditioned_fetching": { - "description": "Type conditioned fetching configuration.", - "default": false, - "type": "boolean" - }, - "fleet_detector": { - "description": "#/definitions/Conf4", - "$ref": "#/definitions/Conf4" - }, - "forbid_mutations": { - "description": "#/definitions/ForbidMutationsConfig", - "$ref": "#/definitions/ForbidMutationsConfig" - }, - "headers": { - "description": "#/definitions/Config8", - "$ref": "#/definitions/Config8" - }, - "health_check": { - "description": "#/definitions/Config", - "$ref": "#/definitions/Config" - }, - "homepage": { - "description": "#/definitions/Homepage", - "$ref": "#/definitions/Homepage" - }, - "include_subgraph_errors": { - "description": "#/definitions/Config9", - "$ref": "#/definitions/Config9" - }, - "license_enforcement": { - "description": "#/definitions/LicenseEnforcementConfig", - "$ref": "#/definitions/LicenseEnforcementConfig" - }, - "limits": { - "description": "#/definitions/Config2", - "$ref": "#/definitions/Config2" - }, - "override_subgraph_url": { - "description": "#/definitions/Conf5", - "$ref": "#/definitions/Conf5" - }, - "persisted_queries": { - "description": "#/definitions/PersistedQueries", - "$ref": "#/definitions/PersistedQueries" - }, - "plugins": { - "description": "#/definitions/Plugins", - "$ref": "#/definitions/Plugins" - }, - "preview_entity_cache": { - "description": "#/definitions/Config10", - "$ref": "#/definitions/Config10" - }, - "preview_file_uploads": { - "description": "#/definitions/FileUploadsConfig", - "$ref": "#/definitions/FileUploadsConfig" - }, - "progressive_override": { - "description": "#/definitions/Config11", - "$ref": "#/definitions/Config11" - }, - "rhai": { - "description": "#/definitions/Conf6", - "$ref": "#/definitions/Conf6" - }, - "sandbox": { - "description": "#/definitions/Sandbox", - "$ref": "#/definitions/Sandbox" - }, - "server": { - "description": "#/definitions/Server", - "$ref": "#/definitions/Server" - }, - "subscription": { - "description": "#/definitions/SubscriptionConfig", - "$ref": "#/definitions/SubscriptionConfig" - }, - "supergraph": { - "description": "#/definitions/Supergraph", - "$ref": "#/definitions/Supergraph" - }, - "telemetry": { - "description": "#/definitions/Conf7", - "$ref": "#/definitions/Conf7" - }, - "tls": { - "description": "#/definitions/Tls", - "$ref": "#/definitions/Tls" - }, - "traffic_shaping": { - "description": "#/definitions/Config17", - "$ref": "#/definitions/Config17" - } - }, - "additionalProperties": false, - "definitions": { - "AWSSigV4Config": { - "description": "Configure AWS sigv4 auth.", - "oneOf": [ - { - "type": "object", - "required": [ - "hardcoded" - ], - "properties": { - "hardcoded": { - "description": "#/definitions/AWSSigV4HardcodedConfig", - "$ref": "#/definitions/AWSSigV4HardcodedConfig" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "default_chain" - ], - "properties": { - "default_chain": { - "description": "#/definitions/DefaultChainConfig", - "$ref": "#/definitions/DefaultChainConfig" - } - }, - "additionalProperties": false - } - ] - }, - "AWSSigV4HardcodedConfig": { - "description": "Hardcoded Config using access_key and secret. Prefer using DefaultChain instead.", - "type": "object", - "required": [ - "access_key_id", - "region", - "secret_access_key", - "service_name" - ], - "properties": { - "access_key_id": { - "description": "The ID for this access key.", - "type": "string" - }, - "assume_role": { - "description": "#/definitions/AssumeRoleProvider", - "$ref": "#/definitions/AssumeRoleProvider", - "nullable": true - }, - "region": { - "description": "The AWS region this chain applies to.", - "type": "string" - }, - "secret_access_key": { - "description": "The secret key used to sign requests.", - "type": "string" - }, - "service_name": { - "description": "The service you're trying to access, eg: \"s3\", \"vpc-lattice-svcs\", etc.", - "type": "string" - } - }, - "additionalProperties": false - }, - "ActiveRequestsAttributes": { - "type": "object", - "properties": { - "http.request.method": { - "description": "The HTTP request method", - "default": false, - "type": "boolean" - }, - "server.address": { - "description": "The server address", - "default": false, - "type": "boolean" - }, - "server.port": { - "description": "The server port", - "default": false, - "type": "boolean" - }, - "url.scheme": { - "description": "The URL scheme", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "All": { - "type": "string", - "enum": [ - "all" - ] - }, - "ApolloMetricsReferenceMode": { - "description": "Apollo usage report reference generation modes.", - "oneOf": [ - { - "description": "Use the extended mode to report input object fields and enum value references as well as object fields.", - "type": "string", - "enum": [ - "extended" - ] - }, - { - "description": "Use the standard mode that only reports referenced object fields.", - "type": "string", - "enum": [ - "standard" - ] - } - ] - }, - "ApolloSignatureNormalizationAlgorithm": { - "description": "Apollo usage report signature normalization algorithm", - "oneOf": [ - { - "description": "Use the algorithm that matches the JavaScript-based implementation.", - "type": "string", - "enum": [ - "legacy" - ] - }, - { - "description": "Use a new algorithm that includes input object forms, normalized aliases and variable names, and removes some edge cases from the JS implementation that affected normalization.", - "type": "string", - "enum": [ - "enhanced" - ] - } - ] - }, - "Apq": { - "description": "Automatic Persisted Queries (APQ) configuration", - "type": "object", - "properties": { - "enabled": { - "description": "Activates Automatic Persisted Queries (enabled by default)", - "default": true, - "type": "boolean" - }, - "router": { - "description": "#/definitions/Router", - "$ref": "#/definitions/Router" - }, - "subgraph": { - "description": "#/definitions/SubgraphConfiguration_for_SubgraphApq", - "$ref": "#/definitions/SubgraphConfiguration_for_SubgraphApq" - } - }, - "additionalProperties": false - }, - "AssumeRoleProvider": { - "description": "Specify assumed role configuration.", - "type": "object", - "required": [ - "role_arn", - "session_name" - ], - "properties": { - "external_id": { - "description": "Unique identifier that might be required when you assume a role in another account.", - "type": "string", - "nullable": true - }, - "role_arn": { - "description": "Amazon Resource Name (ARN) for the role assumed when making requests", - "type": "string" - }, - "session_name": { - "description": "Uniquely identify a session when the same role is assumed by different principals or for different reasons.", - "type": "string" - } - }, - "additionalProperties": false - }, - "AttributeArray": { - "anyOf": [ - { - "description": "Array of bools", - "type": "array", - "items": { - "type": "boolean" - } - }, - { - "description": "Array of integers", - "type": "array", - "items": { - "type": "integer", - "format": "int64" - } - }, - { - "description": "Array of floats", - "type": "array", - "items": { - "type": "number", - "format": "double" - } - }, - { - "description": "Array of strings", - "type": "array", - "items": { - "type": "string" - } - } - ] - }, - "AttributeValue": { - "anyOf": [ - { - "description": "bool values", - "type": "boolean" - }, - { - "description": "i64 values", - "type": "integer", - "format": "int64" - }, - { - "description": "f64 values", - "type": "number", - "format": "double" - }, - { - "description": "String values", - "type": "string" - }, - { - "description": "#/definitions/AttributeArray", - "$ref": "#/definitions/AttributeArray" - } - ] - }, - "AuthConfig": { - "oneOf": [ - { - "type": "object", - "required": [ - "aws_sig_v4" - ], - "properties": { - "aws_sig_v4": { - "description": "#/definitions/AWSSigV4Config", - "$ref": "#/definitions/AWSSigV4Config" - } - }, - "additionalProperties": false - } - ] - }, - "BatchProcessorConfig": { - "description": "Batch processor configuration", - "type": "object", - "properties": { - "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", - "default": 1, - "type": "integer", - "format": "uint", - "minimum": 0.0 - }, - "max_export_batch_size": { - "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": 512, - "type": "integer", - "format": "uint", - "minimum": 0.0 - }, - "max_export_timeout": { - "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", - "default": { - "secs": 30, - "nanos": 0 - }, - "type": "string" - }, - "max_queue_size": { - "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": 2048, - "type": "integer", - "format": "uint", - "minimum": 0.0 - }, - "scheduled_delay": { - "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": { - "secs": 5, - "nanos": 0 - }, - "type": "string" - } - } - }, - "Batching": { - "description": "Configuration for Batching", - "type": "object", - "required": [ - "mode" - ], - "properties": { - "enabled": { - "description": "Activates Batching (disabled by default)", - "default": false, - "type": "boolean" - }, - "maximum_size": { - "description": "Maximum size for a batch", - "default": null, - "type": "integer", - "format": "uint", - "minimum": 0.0, - "nullable": true - }, - "mode": { - "description": "#/definitions/BatchingMode", - "$ref": "#/definitions/BatchingMode" - }, - "subgraph": { - "description": "#/definitions/SubgraphConfiguration_for_CommonBatchingConfig", - "$ref": "#/definitions/SubgraphConfiguration_for_CommonBatchingConfig", - "nullable": true - } - }, - "additionalProperties": false - }, - "BatchingMode": { - "oneOf": [ - { - "description": "batch_http_link", - "type": "string", - "enum": [ - "batch_http_link" - ] - } - ] - }, - "CSRFConfig": { - "description": "CSRF protection configuration.\n\nSee for an explanation on CSRF attacks.", - "type": "object", - "properties": { - "required_headers": { - "description": "Override the headers to check for by setting custom_headers Note that if you set required_headers here, you may also want to have a look at your `CORS` configuration, and make sure you either: - did not set any `allow_headers` list (so it defaults to `mirror_request`) - added your required headers to the allow_headers list, as shown in the `examples/cors-and-csrf/custom-headers.router.yaml` files.", - "default": [ - "x-apollo-operation-name", - "apollo-require-preflight" - ], - "type": "array", - "items": { - "type": "string" - } - }, - "unsafe_disabled": { - "description": "The CSRF plugin is enabled by default.\n\nSetting `unsafe_disabled: true` *disables* CSRF protection.", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "Cache": { - "description": "Cache configuration", - "type": "object", - "properties": { - "in_memory": { - "description": "#/definitions/InMemoryCache", - "$ref": "#/definitions/InMemoryCache" - }, - "redis": { - "description": "#/definitions/RedisCache", - "$ref": "#/definitions/RedisCache", - "nullable": true - } - }, - "additionalProperties": false - }, - "CacheAttributes": { - "type": "object", - "properties": { - "graphql.type.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": false - }, - "CacheInstrumentsConfig": { - "type": "object", - "properties": { - "apollo.router.operations.entity.cache": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - } - }, - "additionalProperties": false - }, - "CacheKind": { - "type": "string", - "enum": [ - "hit", - "miss" - ] - }, - "CallbackMode": { - "description": "Using a callback url", - "type": "object", - "required": [ - "public_url" - ], - "properties": { - "heartbeat_interval": { - "description": "#/definitions/HeartbeatInterval", - "$ref": "#/definitions/HeartbeatInterval" - }, - "listen": { - "description": "#/definitions/ListenAddr", - "$ref": "#/definitions/ListenAddr", - "nullable": true - }, - "path": { - "description": "Specify on which path you want to listen for callbacks (default: /callback)", - "writeOnly": true, - "type": "string", - "nullable": true - }, - "public_url": { - "description": "URL used to access this router instance, including the path configured on the Router", - "type": "string" - }, - "subgraphs": { - "description": "Specify on which subgraph we enable the callback mode for subscription If empty it applies to all subgraphs (passthrough mode takes precedence)", - "default": [], - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "additionalProperties": false - }, - "Chaos": { - "description": "Configuration for chaos testing, trying to reproduce bugs that require uncommon conditions. You probably don’t want this in production!", - "type": "object", - "properties": { - "force_reload": { - "description": "Force a hot reload of the Router (as if the schema or configuration had changed) at a regular time interval.", - "default": null, - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "Client": { - "type": "object", - "properties": { - "dns_resolution_strategy": { - "description": "#/definitions/DnsResolutionStrategy", - "$ref": "#/definitions/DnsResolutionStrategy", - "nullable": true - }, - "experimental_http2": { - "description": "#/definitions/Http2Config", - "$ref": "#/definitions/Http2Config", - "nullable": true - } - }, - "additionalProperties": false - }, - "CommonBatchingConfig": { - "description": "Common options for configuring subgraph batching", - "type": "object", - "required": [ - "enabled" - ], - "properties": { - "enabled": { - "description": "Whether this batching config should be enabled", - "type": "boolean" - } - } - }, - "Compression": { - "oneOf": [ - { - "description": "gzip", - "type": "string", - "enum": [ - "gzip" - ] - }, - { - "description": "deflate", - "type": "string", - "enum": [ - "deflate" - ] - }, - { - "description": "brotli", - "type": "string", - "enum": [ - "br" - ] - }, - { - "description": "identity", - "type": "string", - "enum": [ - "identity" - ] - } - ] - }, - "Condition_for_ConnectorSelector": { - "oneOf": [ - { - "description": "A condition to check a selection against a value.", - "type": "object", - "required": [ - "eq" - ], - "properties": { - "eq": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_ConnectorSelector", - "$ref": "#/definitions/SelectorOrValue_for_ConnectorSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be greater than the second selection.", - "type": "object", - "required": [ - "gt" - ], - "properties": { - "gt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_ConnectorSelector", - "$ref": "#/definitions/SelectorOrValue_for_ConnectorSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be less than the second selection.", - "type": "object", - "required": [ - "lt" - ], - "properties": { - "lt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_ConnectorSelector", - "$ref": "#/definitions/SelectorOrValue_for_ConnectorSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "A condition to check a selection against a selector.", - "type": "object", - "required": [ - "exists" - ], - "properties": { - "exists": { - "description": "#/definitions/ConnectorSelector", - "$ref": "#/definitions/ConnectorSelector" - } - }, - "additionalProperties": false - }, - { - "description": "All sub-conditions must be true.", - "type": "object", - "required": [ - "all" - ], - "properties": { - "all": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_ConnectorSelector", - "$ref": "#/definitions/Condition_for_ConnectorSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "At least one sub-conditions must be true.", - "type": "object", - "required": [ - "any" - ], - "properties": { - "any": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_ConnectorSelector", - "$ref": "#/definitions/Condition_for_ConnectorSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "The sub-condition must not be true", - "type": "object", - "required": [ - "not" - ], - "properties": { - "not": { - "description": "#/definitions/Condition_for_ConnectorSelector", - "$ref": "#/definitions/Condition_for_ConnectorSelector" - } - }, - "additionalProperties": false - }, - { - "description": "Static true condition", - "type": "string", - "enum": [ - "true" - ] - }, - { - "description": "Static false condition", - "type": "string", - "enum": [ - "false" - ] - } - ] - }, - "Condition_for_GraphQLSelector": { - "oneOf": [ - { - "description": "A condition to check a selection against a value.", - "type": "object", - "required": [ - "eq" - ], - "properties": { - "eq": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_GraphQLSelector", - "$ref": "#/definitions/SelectorOrValue_for_GraphQLSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be greater than the second selection.", - "type": "object", - "required": [ - "gt" - ], - "properties": { - "gt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_GraphQLSelector", - "$ref": "#/definitions/SelectorOrValue_for_GraphQLSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be less than the second selection.", - "type": "object", - "required": [ - "lt" - ], - "properties": { - "lt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_GraphQLSelector", - "$ref": "#/definitions/SelectorOrValue_for_GraphQLSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "A condition to check a selection against a selector.", - "type": "object", - "required": [ - "exists" - ], - "properties": { - "exists": { - "description": "#/definitions/GraphQLSelector", - "$ref": "#/definitions/GraphQLSelector" - } - }, - "additionalProperties": false - }, - { - "description": "All sub-conditions must be true.", - "type": "object", - "required": [ - "all" - ], - "properties": { - "all": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_GraphQLSelector", - "$ref": "#/definitions/Condition_for_GraphQLSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "At least one sub-conditions must be true.", - "type": "object", - "required": [ - "any" - ], - "properties": { - "any": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_GraphQLSelector", - "$ref": "#/definitions/Condition_for_GraphQLSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "The sub-condition must not be true", - "type": "object", - "required": [ - "not" - ], - "properties": { - "not": { - "description": "#/definitions/Condition_for_GraphQLSelector", - "$ref": "#/definitions/Condition_for_GraphQLSelector" - } - }, - "additionalProperties": false - }, - { - "description": "Static true condition", - "type": "string", - "enum": [ - "true" - ] - }, - { - "description": "Static false condition", - "type": "string", - "enum": [ - "false" - ] - } - ] - }, - "Condition_for_RouterSelector": { - "oneOf": [ - { - "description": "A condition to check a selection against a value.", - "type": "object", - "required": [ - "eq" - ], - "properties": { - "eq": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_RouterSelector", - "$ref": "#/definitions/SelectorOrValue_for_RouterSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be greater than the second selection.", - "type": "object", - "required": [ - "gt" - ], - "properties": { - "gt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_RouterSelector", - "$ref": "#/definitions/SelectorOrValue_for_RouterSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be less than the second selection.", - "type": "object", - "required": [ - "lt" - ], - "properties": { - "lt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_RouterSelector", - "$ref": "#/definitions/SelectorOrValue_for_RouterSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "A condition to check a selection against a selector.", - "type": "object", - "required": [ - "exists" - ], - "properties": { - "exists": { - "description": "#/definitions/RouterSelector", - "$ref": "#/definitions/RouterSelector" - } - }, - "additionalProperties": false - }, - { - "description": "All sub-conditions must be true.", - "type": "object", - "required": [ - "all" - ], - "properties": { - "all": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "At least one sub-conditions must be true.", - "type": "object", - "required": [ - "any" - ], - "properties": { - "any": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "The sub-condition must not be true", - "type": "object", - "required": [ - "not" - ], - "properties": { - "not": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector" - } - }, - "additionalProperties": false - }, - { - "description": "Static true condition", - "type": "string", - "enum": [ - "true" - ] - }, - { - "description": "Static false condition", - "type": "string", - "enum": [ - "false" - ] - } - ] - }, - "Condition_for_SubgraphSelector": { - "oneOf": [ - { - "description": "A condition to check a selection against a value.", - "type": "object", - "required": [ - "eq" - ], - "properties": { - "eq": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_SubgraphSelector", - "$ref": "#/definitions/SelectorOrValue_for_SubgraphSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be greater than the second selection.", - "type": "object", - "required": [ - "gt" - ], - "properties": { - "gt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_SubgraphSelector", - "$ref": "#/definitions/SelectorOrValue_for_SubgraphSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be less than the second selection.", - "type": "object", - "required": [ - "lt" - ], - "properties": { - "lt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_SubgraphSelector", - "$ref": "#/definitions/SelectorOrValue_for_SubgraphSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "A condition to check a selection against a selector.", - "type": "object", - "required": [ - "exists" - ], - "properties": { - "exists": { - "description": "#/definitions/SubgraphSelector", - "$ref": "#/definitions/SubgraphSelector" - } - }, - "additionalProperties": false - }, - { - "description": "All sub-conditions must be true.", - "type": "object", - "required": [ - "all" - ], - "properties": { - "all": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "At least one sub-conditions must be true.", - "type": "object", - "required": [ - "any" - ], - "properties": { - "any": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "The sub-condition must not be true", - "type": "object", - "required": [ - "not" - ], - "properties": { - "not": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector" - } - }, - "additionalProperties": false - }, - { - "description": "Static true condition", - "type": "string", - "enum": [ - "true" - ] - }, - { - "description": "Static false condition", - "type": "string", - "enum": [ - "false" - ] - } - ] - }, - "Condition_for_SupergraphSelector": { - "oneOf": [ - { - "description": "A condition to check a selection against a value.", - "type": "object", - "required": [ - "eq" - ], - "properties": { - "eq": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_SupergraphSelector", - "$ref": "#/definitions/SelectorOrValue_for_SupergraphSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be greater than the second selection.", - "type": "object", - "required": [ - "gt" - ], - "properties": { - "gt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_SupergraphSelector", - "$ref": "#/definitions/SelectorOrValue_for_SupergraphSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "The first selection must be less than the second selection.", - "type": "object", - "required": [ - "lt" - ], - "properties": { - "lt": { - "type": "array", - "items": { - "description": "#/definitions/SelectorOrValue_for_SupergraphSelector", - "$ref": "#/definitions/SelectorOrValue_for_SupergraphSelector" - }, - "maxItems": 2, - "minItems": 2 - } - }, - "additionalProperties": false - }, - { - "description": "A condition to check a selection against a selector.", - "type": "object", - "required": [ - "exists" - ], - "properties": { - "exists": { - "description": "#/definitions/SupergraphSelector", - "$ref": "#/definitions/SupergraphSelector" - } - }, - "additionalProperties": false - }, - { - "description": "All sub-conditions must be true.", - "type": "object", - "required": [ - "all" - ], - "properties": { - "all": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "At least one sub-conditions must be true.", - "type": "object", - "required": [ - "any" - ], - "properties": { - "any": { - "type": "array", - "items": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector" - } - } - }, - "additionalProperties": false - }, - { - "description": "The sub-condition must not be true", - "type": "object", - "required": [ - "not" - ], - "properties": { - "not": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector" - } - }, - "additionalProperties": false - }, - { - "description": "Static true condition", - "type": "string", - "enum": [ - "true" - ] - }, - { - "description": "Static false condition", - "type": "string", - "enum": [ - "false" - ] - } - ] - }, - "Conf": { - "description": "Authentication", - "type": "object", - "properties": { - "connector": { - "description": "#/definitions/Config6", - "$ref": "#/definitions/Config6", - "nullable": true - }, - "router": { - "description": "#/definitions/RouterConf", - "$ref": "#/definitions/RouterConf", - "nullable": true - }, - "subgraph": { - "description": "#/definitions/Config5", - "$ref": "#/definitions/Config5", - "nullable": true - } - }, - "additionalProperties": false - }, - "Conf2": { - "description": "Authorization plugin", - "type": "object", - "properties": { - "directives": { - "description": "#/definitions/Directives", - "$ref": "#/definitions/Directives" - }, - "require_authentication": { - "description": "Reject unauthenticated requests", - "default": false, - "type": "boolean" - } - } - }, - "Conf3": { - "description": "Configures the externalization plugin", - "type": "object", - "required": [ - "url" - ], - "properties": { - "client": { - "description": "#/definitions/Client", - "$ref": "#/definitions/Client", - "nullable": true - }, - "execution": { - "description": "#/definitions/ExecutionStage", - "$ref": "#/definitions/ExecutionStage" - }, - "router": { - "description": "#/definitions/RouterStage", - "$ref": "#/definitions/RouterStage" - }, - "subgraph": { - "description": "#/definitions/SubgraphStages", - "$ref": "#/definitions/SubgraphStages" - }, - "supergraph": { - "description": "#/definitions/SupergraphStage", - "$ref": "#/definitions/SupergraphStage" - }, - "timeout": { - "description": "The timeout for external requests", - "default": { - "secs": 1, - "nanos": 0 - }, - "type": "string" - }, - "url": { - "description": "The url you'd like to offload processing to", - "type": "string" - } - }, - "additionalProperties": false - }, - "Conf4": { - "type": "object" - }, - "Conf5": { - "description": "Subgraph URL mappings", - "anyOf": [ - { - "description": "Subgraph URL mappings", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - ] - }, - "Conf6": { - "description": "Configuration for the Rhai Plugin", - "type": "object", - "properties": { - "main": { - "description": "The main entry point for Rhai script evaluation", - "type": "string", - "nullable": true - }, - "scripts": { - "description": "The directory where Rhai scripts can be found", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "Conf7": { - "description": "Telemetry configuration", - "type": "object", - "properties": { - "apollo": { - "description": "#/definitions/Config12", - "$ref": "#/definitions/Config12" - }, - "exporters": { - "description": "#/definitions/Exporters", - "$ref": "#/definitions/Exporters" - }, - "instrumentation": { - "description": "#/definitions/Instrumentation", - "$ref": "#/definitions/Instrumentation" - } - }, - "additionalProperties": false - }, - "Config": { - "description": "Configuration options pertaining to the health component.", - "type": "object", - "properties": { - "enabled": { - "description": "Set to false to disable the health check", - "default": true, - "type": "boolean" - }, - "listen": { - "description": "#/definitions/ListenAddr", - "$ref": "#/definitions/ListenAddr" - }, - "path": { - "description": "Optionally set a custom healthcheck path Defaults to /health", - "default": "/health", - "type": "string" - }, - "readiness": { - "description": "#/definitions/ReadinessConfig", - "$ref": "#/definitions/ReadinessConfig" - } - }, - "additionalProperties": false - }, - "Config10": { - "description": "Configuration for entity caching", - "type": "object", - "required": [ - "subgraph" - ], - "properties": { - "enabled": { - "description": "Enable or disable the entity caching feature", - "default": false, - "type": "boolean" - }, - "expose_keys_in_context": { - "description": "Expose cache keys in context", - "default": false, - "type": "boolean" - }, - "invalidation": { - "description": "#/definitions/InvalidationEndpointConfig", - "$ref": "#/definitions/InvalidationEndpointConfig", - "nullable": true - }, - "metrics": { - "description": "#/definitions/Metrics", - "$ref": "#/definitions/Metrics" - }, - "subgraph": { - "description": "#/definitions/SubgraphConfiguration_for_Subgraph", - "$ref": "#/definitions/SubgraphConfiguration_for_Subgraph" - } - }, - "additionalProperties": false - }, - "Config11": { - "description": "Configuration for the progressive override plugin", - "type": "object" - }, - "Config12": { - "type": "object", - "properties": { - "batch_processor": { - "description": "#/definitions/BatchProcessorConfig", - "$ref": "#/definitions/BatchProcessorConfig" - }, - "buffer_size": { - "description": "The buffer size for sending traces to Apollo. Increase this if you are experiencing lost traces.", - "default": 10000, - "type": "integer", - "format": "uint", - "minimum": 1.0 - }, - "client_name_header": { - "description": "The name of the header to extract from requests when populating 'client name' for traces and metrics in Apollo Studio.", - "default": "apollographql-client-name", - "type": "string", - "nullable": true - }, - "client_version_header": { - "description": "The name of the header to extract from requests when populating 'client version' for traces and metrics in Apollo Studio.", - "default": "apollographql-client-version", - "type": "string", - "nullable": true - }, - "endpoint": { - "description": "The Apollo Studio endpoint for exporting traces and metrics.", - "default": "https://usage-reporting.api.apollographql.com/api/ingress/traces", - "type": "string" - }, - "errors": { - "description": "#/definitions/ErrorsConfiguration", - "$ref": "#/definitions/ErrorsConfiguration" - }, - "experimental_local_field_metrics": { - "description": "Enable field metrics that are generated without FTV1 to be sent to Apollo Studio.", - "default": false, - "type": "boolean" - }, - "experimental_otlp_endpoint": { - "description": "The Apollo Studio endpoint for exporting traces and metrics.", - "default": "https://usage-reporting.api.apollographql.com/", - "type": "string" - }, - "experimental_otlp_tracing_protocol": { - "description": "#/definitions/Protocol", - "$ref": "#/definitions/Protocol" - }, - "field_level_instrumentation_sampler": { - "description": "#/definitions/SamplerOption", - "$ref": "#/definitions/SamplerOption" - }, - "metrics_reference_mode": { - "description": "#/definitions/ApolloMetricsReferenceMode", - "$ref": "#/definitions/ApolloMetricsReferenceMode" - }, - "otlp_tracing_sampler": { - "description": "#/definitions/SamplerOption", - "$ref": "#/definitions/SamplerOption" - }, - "send_headers": { - "description": "#/definitions/ForwardHeaders", - "$ref": "#/definitions/ForwardHeaders" - }, - "send_variable_values": { - "description": "#/definitions/ForwardValues", - "$ref": "#/definitions/ForwardValues" - }, - "signature_normalization_algorithm": { - "description": "#/definitions/ApolloSignatureNormalizationAlgorithm", - "$ref": "#/definitions/ApolloSignatureNormalizationAlgorithm" - } - }, - "additionalProperties": false - }, - "Config13": { - "type": "object", - "required": [ - "enabled" - ], - "properties": { - "batch_processor": { - "description": "#/definitions/BatchProcessorConfig", - "$ref": "#/definitions/BatchProcessorConfig" - }, - "enabled": { - "description": "Enable otlp", - "type": "boolean" - }, - "endpoint": { - "description": "#/definitions/UriEndpoint", - "$ref": "#/definitions/UriEndpoint" - }, - "grpc": { - "description": "#/definitions/GrpcExporter", - "$ref": "#/definitions/GrpcExporter" - }, - "http": { - "description": "#/definitions/HttpExporter", - "$ref": "#/definitions/HttpExporter" - }, - "protocol": { - "description": "#/definitions/Protocol", - "$ref": "#/definitions/Protocol" - }, - "temporality": { - "description": "#/definitions/Temporality", - "$ref": "#/definitions/Temporality" - } - }, - "additionalProperties": false - }, - "Config14": { - "description": "Prometheus configuration", - "type": "object", - "properties": { - "enabled": { - "description": "Set to true to enable", - "default": false, - "type": "boolean" - }, - "listen": { - "description": "#/definitions/ListenAddr", - "$ref": "#/definitions/ListenAddr" - }, - "path": { - "description": "The path where prometheus will be exposed", - "default": "/metrics", - "type": "string" - } - }, - "additionalProperties": false - }, - "Config15": { - "type": "object", - "required": [ - "enabled" - ], - "properties": { - "batch_processor": { - "description": "#/definitions/BatchProcessorConfig", - "$ref": "#/definitions/BatchProcessorConfig" - }, - "enabled": { - "description": "Enable zipkin", - "type": "boolean" - }, - "endpoint": { - "description": "#/definitions/UriEndpoint", - "$ref": "#/definitions/UriEndpoint" - } - }, - "additionalProperties": false - }, - "Config16": { - "type": "object", - "required": [ - "enabled" - ], - "properties": { - "batch_processor": { - "description": "#/definitions/BatchProcessorConfig", - "$ref": "#/definitions/BatchProcessorConfig" - }, - "enable_span_mapping": { - "description": "Enable datadog span mapping for span name and resource name.", - "default": true, - "type": "boolean" - }, - "enabled": { - "description": "Enable datadog", - "type": "boolean" - }, - "endpoint": { - "description": "#/definitions/UriEndpoint", - "$ref": "#/definitions/UriEndpoint" - }, - "fixed_span_names": { - "description": "Fixes the span names, this means that the APM view will show the original span names in the operation dropdown.", - "default": true, - "type": "boolean" - }, - "resource_mapping": { - "description": "Custom mapping to be used as the resource field in spans, defaults to: router -> http.route supergraph -> graphql.operation.name query_planning -> graphql.operation.name subgraph -> subgraph.name subgraph_request -> subgraph.name http_request -> http.route", - "default": {}, - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "span_metrics": { - "description": "Which spans will be eligible for span stats to be collected for viewing in the APM view. Defaults to true for `request`, `router`, `query_parsing`, `supergraph`, `execution`, `query_planning`, `subgraph`, `subgraph_request`, `connect`, `connect_request` and `http_request`.", - "default": { - "parse_query": true, - "connect": true, - "connect_request": true, - "subgraph_request": true, - "query_planning": true, - "request": true, - "subgraph": true, - "supergraph": true, - "http_request": true, - "router": true, - "execution": true - }, - "type": "object", - "additionalProperties": { - "type": "boolean" - } - } - }, - "additionalProperties": false - }, - "Config17": { - "description": "Configuration for the experimental traffic shaping plugin", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/SubgraphShaping", - "$ref": "#/definitions/SubgraphShaping", - "nullable": true - }, - "connector": { - "description": "#/definitions/ConnectorsShapingConfig", - "$ref": "#/definitions/ConnectorsShapingConfig" - }, - "deduplicate_variables": { - "description": "DEPRECATED, now always enabled: Enable variable deduplication optimization when sending requests to subgraphs (https://github.com/apollographql/router/issues/87)", - "default": null, - "type": "boolean", - "nullable": true - }, - "router": { - "description": "#/definitions/RouterShaping", - "$ref": "#/definitions/RouterShaping", - "nullable": true - }, - "subgraphs": { - "description": "Applied on specific subgraphs", - "type": "object", - "additionalProperties": { - "description": "#/definitions/SubgraphShaping", - "$ref": "#/definitions/SubgraphShaping" - } - } - }, - "additionalProperties": false - }, - "Config2": { - "description": "Configuration for operation limits, parser limits, HTTP limits, etc.", - "type": "object", - "properties": { - "http1_max_request_buf_size": { - "description": "Limit the maximum buffer size for the HTTP1 connection.\n\nDefault is ~400kib.", - "default": null, - "type": "string", - "nullable": true - }, - "http1_max_request_headers": { - "description": "Limit the maximum number of headers of incoming HTTP1 requests. Default is 100.\n\nIf router receives more headers than the buffer size, it responds to the client with \"431 Request Header Fields Too Large\".", - "default": null, - "type": "integer", - "format": "uint", - "minimum": 0.0, - "nullable": true - }, - "http_max_request_bytes": { - "description": "Limit the size of incoming HTTP requests read from the network, to protect against running out of memory. Default: 2000000 (2 MB)", - "default": 2000000, - "type": "integer", - "format": "uint", - "minimum": 0.0 - }, - "introspection_max_depth": { - "description": "Limit the depth of nested list fields in introspection queries to protect avoid generating huge responses. Returns a GraphQL error with `{ message: \"Maximum introspection depth exceeded\" }` when nested fields exceed the limit. Default: true", - "default": true, - "type": "boolean" - }, - "max_aliases": { - "description": "If set, requests with operations with more aliases than this maximum are rejected with a HTTP 400 Bad Request response and GraphQL error with `\"extensions\": {\"code\": \"MAX_ALIASES_LIMIT\"}`", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0.0, - "nullable": true - }, - "max_depth": { - "description": "If set, requests with operations deeper than this maximum are rejected with a HTTP 400 Bad Request response and GraphQL error with `\"extensions\": {\"code\": \"MAX_DEPTH_LIMIT\"}`\n\nCounts depth of an operation, looking at its selection sets,˛ including fields in fragments and inline fragments. The following example has a depth of 3.\n\n```graphql query getProduct { book { # 1 ...bookDetails } }\n\nfragment bookDetails on Book { details { # 2 ... on ProductDetailsBook { country # 3 } } } ```", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0.0, - "nullable": true - }, - "max_height": { - "description": "If set, requests with operations higher than this maximum are rejected with a HTTP 400 Bad Request response and GraphQL error with `\"extensions\": {\"code\": \"MAX_DEPTH_LIMIT\"}`\n\nHeight is based on simple merging of fields using the same name or alias, but only within the same selection set. For example `name` here is only counted once and the query has height 3, not 4:\n\n```graphql query { name { first } name { last } } ```\n\nThis may change in a future version of Apollo Router to do [full field merging across fragments][merging] instead.\n\n[merging]: https://spec.graphql.org/October2021/#sec-Field-Selection-Merging]", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0.0, - "nullable": true - }, - "max_root_fields": { - "description": "If set, requests with operations with more root fields than this maximum are rejected with a HTTP 400 Bad Request response and GraphQL error with `\"extensions\": {\"code\": \"MAX_ROOT_FIELDS_LIMIT\"}`\n\nThis limit counts only the top level fields in a selection set, including fragments and inline fragments.", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0.0, - "nullable": true - }, - "parser_max_recursion": { - "description": "Limit recursion in the GraphQL parser to protect against stack overflow. default: 500", - "default": 500, - "type": "integer", - "format": "uint", - "minimum": 0.0 - }, - "parser_max_tokens": { - "description": "Limit the number of tokens the GraphQL parser processes before aborting.", - "default": 15000, - "type": "integer", - "format": "uint", - "minimum": 0.0 - }, - "warn_only": { - "description": "If set to true (which is the default is dev mode), requests that exceed a `max_*` limit are *not* rejected. Instead they are executed normally, and a warning is logged.", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "Config3": { - "description": "This is a broken plugin for testing purposes only.", - "type": "object", - "required": [ - "enabled" - ], - "properties": { - "enabled": { - "description": "Enable the broken plugin.", - "type": "boolean" - } - } - }, - "Config4": { - "description": "Restricted plugin (for testing purposes only)", - "type": "object", - "required": [ - "enabled" - ], - "properties": { - "enabled": { - "description": "Enable the restricted plugin (for testing purposes only)", - "type": "boolean" - } - } - }, - "Config5": { - "description": "Configure subgraph authentication", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/AuthConfig", - "$ref": "#/definitions/AuthConfig", - "nullable": true - }, - "subgraphs": { - "description": "Create a configuration that will apply only to a specific subgraph.", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/AuthConfig", - "$ref": "#/definitions/AuthConfig" - } - } - }, - "additionalProperties": false - }, - "Config6": { - "description": "Configure connector authentication", - "type": "object", - "properties": { - "sources": { - "description": "Create a configuration that will apply only to a specific source.", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/AuthConfig", - "$ref": "#/definitions/AuthConfig" - } - } - }, - "additionalProperties": false - }, - "Config7": { - "type": "object" - }, - "Config8": { - "description": "Configuration for header propagation", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/HeadersLocation", - "$ref": "#/definitions/HeadersLocation", - "nullable": true - }, - "connector": { - "description": "#/definitions/ConnectorHeadersConfiguration", - "$ref": "#/definitions/ConnectorHeadersConfiguration" - }, - "subgraphs": { - "description": "Rules to specific subgraphs", - "type": "object", - "additionalProperties": { - "description": "#/definitions/HeadersLocation", - "$ref": "#/definitions/HeadersLocation" - } - } - }, - "additionalProperties": false - }, - "Config9": { - "description": "Configuration for exposing errors that originate from subgraphs", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/ErrorMode", - "$ref": "#/definitions/ErrorMode" - }, - "subgraphs": { - "description": "Overrides global configuration on a per-subgraph basis", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/SubgraphConfig", - "$ref": "#/definitions/SubgraphConfig" - } - } - }, - "additionalProperties": false - }, - "ConnectorAttributes": { - "type": "object", - "properties": { - "connector.http.method": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "connector.source.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "connector.url.template": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": false - }, - "ConnectorConfiguration_for_TlsClient": { - "type": "object", - "properties": { - "all": { - "description": "#/definitions/TlsClient", - "$ref": "#/definitions/TlsClient" - }, - "sources": { - "description": "Map of subgraph_name.connector_source_name to configuration", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/TlsClient", - "$ref": "#/definitions/TlsClient" - } - } - } - }, - "ConnectorEventsConfig": { - "type": "object", - "properties": { - "error": { - "description": "#/definitions/StandardEventConfig_for_ConnectorSelector", - "$ref": "#/definitions/StandardEventConfig_for_ConnectorSelector" - }, - "request": { - "description": "#/definitions/StandardEventConfig_for_ConnectorSelector", - "$ref": "#/definitions/StandardEventConfig_for_ConnectorSelector" - }, - "response": { - "description": "#/definitions/StandardEventConfig_for_ConnectorSelector", - "$ref": "#/definitions/StandardEventConfig_for_ConnectorSelector" - } - }, - "additionalProperties": false - }, - "ConnectorHeadersConfiguration": { - "type": "object", - "properties": { - "all": { - "description": "#/definitions/HeadersLocation", - "$ref": "#/definitions/HeadersLocation", - "nullable": true - }, - "sources": { - "description": "Map of subgraph_name.connector_source_name to configuration", - "type": "object", - "additionalProperties": { - "description": "#/definitions/HeadersLocation", - "$ref": "#/definitions/HeadersLocation" - } - } - }, - "additionalProperties": false - }, - "ConnectorInstrumentsConfig": { - "type": "object", - "properties": { - "http.client.request.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - }, - "http.client.request.duration": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - }, - "http.client.response.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - } - }, - "additionalProperties": false - }, - "ConnectorSelector": { - "anyOf": [ - { - "type": "object", - "required": [ - "subgraph_name" - ], - "properties": { - "subgraph_name": { - "description": "The subgraph name", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "connector_source" - ], - "properties": { - "connector_source": { - "description": "#/definitions/ConnectorSource", - "$ref": "#/definitions/ConnectorSource" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "connector_http_request_header" - ], - "properties": { - "connector_http_request_header": { - "description": "The name of a connector HTTP request header.", - "type": "string" - }, - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "connector_http_response_header" - ], - "properties": { - "connector_http_response_header": { - "description": "The name of a connector HTTP response header.", - "type": "string" - }, - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "connector_http_response_status" - ], - "properties": { - "connector_http_response_status": { - "description": "#/definitions/ResponseStatus", - "$ref": "#/definitions/ResponseStatus" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "connector_http_method" - ], - "properties": { - "connector_http_method": { - "description": "The connector HTTP method.", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "connector_url_template" - ], - "properties": { - "connector_url_template": { - "description": "The connector URL template.", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "static" - ], - "properties": { - "static": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "error" - ], - "properties": { - "error": { - "description": "#/definitions/ErrorRepr", - "$ref": "#/definitions/ErrorRepr" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "connector_request_mapping_problems" - ], - "properties": { - "connector_request_mapping_problems": { - "description": "#/definitions/MappingProblems", - "$ref": "#/definitions/MappingProblems" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "connector_response_mapping_problems" - ], - "properties": { - "connector_response_mapping_problems": { - "description": "#/definitions/MappingProblems", - "$ref": "#/definitions/MappingProblems" - } - }, - "additionalProperties": false - } - ] - }, - "ConnectorShaping": { - "type": "object", - "properties": { - "compression": { - "description": "#/definitions/Compression", - "$ref": "#/definitions/Compression", - "nullable": true - }, - "dns_resolution_strategy": { - "description": "#/definitions/DnsResolutionStrategy", - "$ref": "#/definitions/DnsResolutionStrategy", - "nullable": true - }, - "experimental_http2": { - "description": "#/definitions/Http2Config", - "$ref": "#/definitions/Http2Config", - "nullable": true - }, - "global_rate_limit": { - "description": "#/definitions/RateLimitConf", - "$ref": "#/definitions/RateLimitConf", - "nullable": true - }, - "timeout": { - "description": "Enable timeout for connectors requests", - "default": null, - "type": "string" - } - }, - "additionalProperties": false - }, - "ConnectorSource": { - "oneOf": [ - { - "description": "The name of the connector source.", - "type": "string", - "enum": [ - "name" - ] - } - ] - }, - "ConnectorSpans": { - "type": "object", - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional" - } - }, - "additionalProperties": false - }, - "ConnectorValue": { - "anyOf": [ - { - "description": "#/definitions/Standard", - "$ref": "#/definitions/Standard" - }, - { - "description": "#/definitions/ConnectorSelector", - "$ref": "#/definitions/ConnectorSelector" - } - ] - }, - "ConnectorsConfig": { - "type": "object", - "properties": { - "debug_extensions": { - "description": "Enables connector debugging information on response extensions if the feature is enabled", - "default": false, - "type": "boolean" - }, - "expose_sources_in_context": { - "description": "When enabled, adds an entry to the context for use in coprocessors ```json { \"context\": { \"entries\": { \"apollo_connectors::sources_in_query_plan\": [ { \"subgraph_name\": \"subgraph\", \"source_name\": \"source\" } ] } } } ```", - "default": false, - "type": "boolean" - }, - "max_requests_per_operation_per_source": { - "description": "The maximum number of requests for a connector source", - "default": null, - "type": "integer", - "format": "uint", - "minimum": 0.0, - "nullable": true - }, - "preview_connect_v0_2": { - "description": "Feature gate for connect spec v0.2. Set to `true` to enable the using the v0.2 spec during the preview phase.", - "default": null, - "type": "boolean", - "nullable": true - }, - "sources": { - "description": "Map of subgraph_name.connector_source_name to source configuration", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/SourceConfiguration", - "$ref": "#/definitions/SourceConfiguration" - } - }, - "subgraphs": { - "description": "A map of subgraph name to connectors config for that subgraph", - "default": {}, - "deprecated": true, - "type": "object", - "additionalProperties": { - "description": "#/definitions/SubgraphConnectorConfiguration", - "$ref": "#/definitions/SubgraphConnectorConfiguration" - } - } - }, - "additionalProperties": false - }, - "ConnectorsShapingConfig": { - "type": "object", - "properties": { - "all": { - "description": "#/definitions/ConnectorShaping", - "$ref": "#/definitions/ConnectorShaping", - "nullable": true - }, - "sources": { - "description": "Applied on specific connector sources", - "type": "object", - "additionalProperties": { - "description": "#/definitions/ConnectorShaping", - "$ref": "#/definitions/ConnectorShaping" - } - } - }, - "additionalProperties": false - }, - "ContextConf": { - "description": "Configures the context", - "anyOf": [ - { - "description": "Deprecated configuration using a boolean", - "type": "boolean" - }, - { - "description": "#/definitions/NewContextConf", - "$ref": "#/definitions/NewContextConf" - } - ] - }, - "Cors": { - "description": "Cross origin request configuration.", - "type": "object", - "properties": { - "allow_any_origin": { - "description": "Set to true to allow any origin.\n\nDefaults to false Having this set to true is the only way to allow Origin: null.", - "default": false, - "type": "boolean" - }, - "allow_credentials": { - "description": "Set to true to add the `Access-Control-Allow-Credentials` header.", - "default": false, - "type": "boolean" - }, - "allow_headers": { - "description": "The headers to allow.\n\nIf this value is not set, the router will mirror client's `Access-Control-Request-Headers`.\n\nNote that if you set headers here, you also want to have a look at your `CSRF` plugins configuration, and make sure you either: - accept `x-apollo-operation-name` AND / OR `apollo-require-preflight` - defined `csrf` required headers in your yml configuration, as shown in the `examples/cors-and-csrf/custom-headers.router.yaml` files.", - "default": [], - "type": "array", - "items": { - "type": "string" - } - }, - "expose_headers": { - "description": "Which response headers should be made available to scripts running in the browser, in response to a cross-origin request.", - "default": null, - "type": "array", - "items": { - "type": "string" - }, - "nullable": true - }, - "match_origins": { - "description": "`Regex`es you want to match the origins against to determine if they're allowed. Defaults to an empty list. Note that `origins` will be evaluated before `match_origins`", - "default": null, - "type": "array", - "items": { - "type": "string" - }, - "nullable": true - }, - "max_age": { - "description": "The `Access-Control-Max-Age` header value in time units", - "default": null, - "type": "string" - }, - "methods": { - "description": "Allowed request methods. Defaults to GET, POST, OPTIONS.", - "default": [ - "GET", - "POST", - "OPTIONS" - ], - "type": "array", - "items": { - "type": "string" - } - }, - "origins": { - "description": "The origin(s) to allow requests from. Defaults to `https://studio.apollographql.com/` for Apollo Studio.", - "default": [ - "https://studio.apollographql.com" - ], - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "CostValue": { - "oneOf": [ - { - "description": "The estimated cost of the operation using the currently configured cost model", - "type": "string", - "enum": [ - "estimated" - ] - }, - { - "description": "The actual cost of the operation using the currently configured cost model", - "type": "string", - "enum": [ - "actual" - ] - }, - { - "description": "The delta between the estimated and actual cost of the operation using the currently configured cost model", - "type": "string", - "enum": [ - "delta" - ] - }, - { - "description": "The result of the cost calculation. This is the error code returned by the cost calculation.", - "type": "string", - "enum": [ - "result" - ] - } - ] - }, - "DefaultAttributeRequirementLevel": { - "oneOf": [ - { - "description": "No default attributes set on spans, you have to set it one by one in the configuration to enable some attributes", - "type": "string", - "enum": [ - "none" - ] - }, - { - "description": "Attributes that are marked as required in otel semantic conventions and apollo documentation will be included (default)", - "type": "string", - "enum": [ - "required" - ] - }, - { - "description": "Attributes that are marked as required or recommended in otel semantic conventions and apollo documentation will be included", - "type": "string", - "enum": [ - "recommended" - ] - } - ] - }, - "DefaultChainConfig": { - "description": "Configuration of the DefaultChainProvider", - "type": "object", - "required": [ - "region", - "service_name" - ], - "properties": { - "assume_role": { - "description": "#/definitions/AssumeRoleProvider", - "$ref": "#/definitions/AssumeRoleProvider", - "nullable": true - }, - "profile_name": { - "description": "The profile name used by this provider", - "type": "string", - "nullable": true - }, - "region": { - "description": "The AWS region this chain applies to.", - "type": "string" - }, - "service_name": { - "description": "The service you're trying to access, eg: \"s3\", \"vpc-lattice-svcs\", etc.", - "type": "string" - } - }, - "additionalProperties": false - }, - "DefaultedStandardInstrument_for_ActiveRequestsAttributes": { - "anyOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "object", - "required": [ - "attributes" - ], - "properties": { - "attributes": { - "description": "#/definitions/ActiveRequestsAttributes", - "$ref": "#/definitions/ActiveRequestsAttributes" - } - }, - "additionalProperties": false - } - ] - }, - "DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector": { - "anyOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "object", - "required": [ - "attributes" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - } - }, - "additionalProperties": false - } - ] - }, - "DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector": { - "anyOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "object", - "required": [ - "attributes" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - } - }, - "additionalProperties": false - } - ] - }, - "DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector": { - "anyOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "object", - "required": [ - "attributes" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector" - } - }, - "additionalProperties": false - } - ] - }, - "DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector": { - "anyOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "object", - "required": [ - "attributes" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - } - }, - "additionalProperties": false - } - ] - }, - "DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector": { - "anyOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "object", - "required": [ - "attributes" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - } - }, - "additionalProperties": false - } - ] - }, - "DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector": { - "anyOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "object", - "required": [ - "attributes" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - } - }, - "additionalProperties": false - } - ] - }, - "DemandControlConfig": { - "description": "Demand control configuration", - "type": "object", - "required": [ - "enabled", - "mode", - "strategy" - ], - "properties": { - "enabled": { - "description": "Enable demand control", - "type": "boolean" - }, - "mode": { - "description": "#/definitions/Mode", - "$ref": "#/definitions/Mode" - }, - "strategy": { - "description": "#/definitions/StrategyConfig", - "$ref": "#/definitions/StrategyConfig" - } - }, - "additionalProperties": false - }, - "Directives": { - "type": "object", - "properties": { - "dry_run": { - "description": "generates the authorization error messages without modying the query", - "default": false, - "type": "boolean" - }, - "enabled": { - "description": "enables the `@authenticated` and `@requiresScopes` directives", - "default": true, - "type": "boolean" - }, - "errors": { - "description": "#/definitions/ErrorConfig", - "$ref": "#/definitions/ErrorConfig" - }, - "reject_unauthorized": { - "description": "refuse a query entirely if any part would be filtered", - "default": false, - "type": "boolean" - } - } - }, - "Disabled": { - "type": "string", - "enum": [ - "disabled" - ] - }, - "DisplayTraceIdFormat": { - "anyOf": [ - { - "description": "#/definitions/TraceIdFormat", - "$ref": "#/definitions/TraceIdFormat" - }, - { - "type": "boolean" - } - ] - }, - "DnsResolutionStrategy": { - "oneOf": [ - { - "description": "Only query for `A` (IPv4) records", - "type": "string", - "enum": [ - "ipv4_only" - ] - }, - { - "description": "Only query for `AAAA` (IPv6) records", - "type": "string", - "enum": [ - "ipv6_only" - ] - }, - { - "description": "Query for both `A` (IPv4) and `AAAA` (IPv6) records in parallel", - "type": "string", - "enum": [ - "ipv4_and_ipv6" - ] - }, - { - "description": "Query for `AAAA` (IPv6) records first; if that fails, query for `A` (IPv4) records", - "type": "string", - "enum": [ - "ipv6_then_ipv4" - ] - }, - { - "description": "Default: Query for `A` (IPv4) records first; if that fails, query for `AAAA` (IPv6) records", - "type": "string", - "enum": [ - "ipv4_then_ipv6" - ] - } - ] - }, - "Enabled": { - "type": "string", - "enum": [ - "enabled" - ] - }, - "EntityType": { - "anyOf": [ - { - "description": "#/definitions/All", - "$ref": "#/definitions/All" - }, - { - "type": "string" - } - ] - }, - "ErrorConfig": { - "type": "object", - "properties": { - "log": { - "description": "log authorization errors", - "default": true, - "type": "boolean" - }, - "response": { - "description": "#/definitions/ErrorLocation", - "$ref": "#/definitions/ErrorLocation" - } - } - }, - "ErrorConfiguration": { - "type": "object", - "properties": { - "redact": { - "description": "Redact subgraph errors to Apollo Studio", - "default": true, - "type": "boolean" - }, - "redaction_policy": { - "description": "#/definitions/ErrorRedactionPolicy", - "$ref": "#/definitions/ErrorRedactionPolicy" - }, - "send": { - "description": "Send subgraph errors to Apollo Studio", - "default": true, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "ErrorLocation": { - "oneOf": [ - { - "description": "store authorization errors in the response errors", - "type": "string", - "enum": [ - "errors" - ] - }, - { - "description": "store authorization errors in the response extensions", - "type": "string", - "enum": [ - "extensions" - ] - }, - { - "description": "do not add the authorization errors to the GraphQL response", - "type": "string", - "enum": [ - "disabled" - ] - } - ] - }, - "ErrorMode": { - "anyOf": [ - { - "description": "When `true`, Propagate the original error as is. Otherwise, redact it.", - "type": "boolean" - }, - { - "description": "Allow specific extension keys with required redact_message", - "type": "object", - "required": [ - "allow_extensions_keys", - "redact_message" - ], - "properties": { - "allow_extensions_keys": { - "description": "Allow specific extension keys", - "type": "array", - "items": { - "type": "string" - } - }, - "redact_message": { - "description": "redact error messages for all subgraphs", - "type": "boolean" - } - } - }, - { - "description": "Deny specific extension keys with required redact_message", - "type": "object", - "required": [ - "deny_extensions_keys", - "redact_message" - ], - "properties": { - "deny_extensions_keys": { - "description": "Deny specific extension keys", - "type": "array", - "items": { - "type": "string" - } - }, - "redact_message": { - "description": "redact error messages for all subgraphs", - "type": "boolean" - } - } - } - ] - }, - "ErrorRedactionPolicy": { - "description": "Allow some error fields to be send to Apollo Studio even when `redact` is true.", - "oneOf": [ - { - "description": "Applies redaction to all error details.", - "type": "string", - "enum": [ - "strict" - ] - }, - { - "description": "Modifies the `redact` setting by excluding the `extensions.code` field in errors from redaction.", - "type": "string", - "enum": [ - "extended" - ] - } - ] - }, - "ErrorRepr": { - "oneOf": [ - { - "description": "The error reason", - "type": "string", - "enum": [ - "reason" - ] - } - ] - }, - "ErrorsConfiguration": { - "type": "object", - "properties": { - "preview_extended_error_metrics": { - "description": "#/definitions/ExtendedErrorMetricsMode", - "$ref": "#/definitions/ExtendedErrorMetricsMode" - }, - "subgraph": { - "description": "#/definitions/SubgraphErrorConfig", - "$ref": "#/definitions/SubgraphErrorConfig" - } - }, - "additionalProperties": false - }, - "EventLevelConfig": { - "type": "string", - "enum": [ - "info", - "warn", - "error", - "off" - ] - }, - "EventOn": { - "description": "When to trigger the event.", - "oneOf": [ - { - "description": "Log the event on request", - "type": "string", - "enum": [ - "request" - ] - }, - { - "description": "Log the event on response", - "type": "string", - "enum": [ - "response" - ] - }, - { - "description": "Log the event on every chunks in the response", - "type": "string", - "enum": [ - "event_response" - ] - }, - { - "description": "Log the event on error", - "type": "string", - "enum": [ - "error" - ] - } - ] - }, - "Event_for_ConnectorAttributes_and_ConnectorSelector": { - "description": "An event that can be logged as part of a trace. The event has an implicit `type` attribute that matches the name of the event in the yaml and a message that can be used to provide additional information.", - "type": "object", - "required": [ - "level", - "message", - "on" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_ConnectorSelector", - "$ref": "#/definitions/Condition_for_ConnectorSelector" - }, - "level": { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - }, - "message": { - "description": "The event message.", - "type": "string" - }, - "on": { - "description": "#/definitions/EventOn", - "$ref": "#/definitions/EventOn" - } - } - }, - "Event_for_RouterAttributes_and_RouterSelector": { - "description": "An event that can be logged as part of a trace. The event has an implicit `type` attribute that matches the name of the event in the yaml and a message that can be used to provide additional information.", - "type": "object", - "required": [ - "level", - "message", - "on" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector" - }, - "level": { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - }, - "message": { - "description": "The event message.", - "type": "string" - }, - "on": { - "description": "#/definitions/EventOn", - "$ref": "#/definitions/EventOn" - } - } - }, - "Event_for_SubgraphAttributes_and_SubgraphSelector": { - "description": "An event that can be logged as part of a trace. The event has an implicit `type` attribute that matches the name of the event in the yaml and a message that can be used to provide additional information.", - "type": "object", - "required": [ - "level", - "message", - "on" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector" - }, - "level": { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - }, - "message": { - "description": "The event message.", - "type": "string" - }, - "on": { - "description": "#/definitions/EventOn", - "$ref": "#/definitions/EventOn" - } - } - }, - "Event_for_SupergraphAttributes_and_SupergraphSelector": { - "description": "An event that can be logged as part of a trace. The event has an implicit `type` attribute that matches the name of the event in the yaml and a message that can be used to provide additional information.", - "type": "object", - "required": [ - "level", - "message", - "on" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector" - }, - "level": { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - }, - "message": { - "description": "The event message.", - "type": "string" - }, - "on": { - "description": "#/definitions/EventOn", - "$ref": "#/definitions/EventOn" - } - } - }, - "Event_for_SupergraphSelector": { - "oneOf": [ - { - "description": "For every supergraph response payload (including subscription events and defer events)", - "type": "string", - "enum": [ - "event_duration" - ] - }, - { - "description": "For every supergraph response payload (including subscription events and defer events)", - "type": "string", - "enum": [ - "event_unit" - ] - }, - { - "description": "For every supergraph response payload (including subscription events and defer events)", - "type": "object", - "required": [ - "event_custom" - ], - "properties": { - "event_custom": { - "description": "#/definitions/SupergraphSelector", - "$ref": "#/definitions/SupergraphSelector" - } - }, - "additionalProperties": false - } - ] - }, - "Events": { - "description": "Events are", - "type": "object", - "properties": { - "connector": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::events::ConnectorEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::events::ConnectorEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event" - }, - "router": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::events::RouterEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::events::RouterEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event" - }, - "subgraph": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::events::SubgraphEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::events::SubgraphEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event" - }, - "supergraph": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::events::SupergraphEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::events::SupergraphEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event" - } - }, - "additionalProperties": false - }, - "ExecutionRequestConf": { - "description": "What information is passed to a router request/response stage", - "type": "object", - "properties": { - "body": { - "description": "Send the body", - "default": false, - "type": "boolean" - }, - "context": { - "description": "#/definitions/ContextConf", - "$ref": "#/definitions/ContextConf" - }, - "headers": { - "description": "Send the headers", - "default": false, - "type": "boolean" - }, - "method": { - "description": "Send the method", - "default": false, - "type": "boolean" - }, - "query_plan": { - "description": "Send the query plan", - "default": false, - "type": "boolean" - }, - "sdl": { - "description": "Send the SDL", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "ExecutionResponseConf": { - "description": "What information is passed to a router request/response stage", - "type": "object", - "properties": { - "body": { - "description": "Send the body", - "default": false, - "type": "boolean" - }, - "context": { - "description": "#/definitions/ContextConf", - "$ref": "#/definitions/ContextConf" - }, - "headers": { - "description": "Send the headers", - "default": false, - "type": "boolean" - }, - "sdl": { - "description": "Send the SDL", - "default": false, - "type": "boolean" - }, - "status_code": { - "description": "Send the HTTP status", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "ExecutionStage": { - "type": "object", - "properties": { - "request": { - "description": "#/definitions/ExecutionRequestConf", - "$ref": "#/definitions/ExecutionRequestConf" - }, - "response": { - "description": "#/definitions/ExecutionResponseConf", - "$ref": "#/definitions/ExecutionResponseConf" - } - } - }, - "Exporters": { - "description": "Exporter configuration", - "type": "object", - "properties": { - "logging": { - "description": "#/definitions/Logging", - "$ref": "#/definitions/Logging" - }, - "metrics": { - "description": "#/definitions/Metrics2", - "$ref": "#/definitions/Metrics2" - }, - "tracing": { - "description": "#/definitions/Tracing", - "$ref": "#/definitions/Tracing" - } - }, - "additionalProperties": false - }, - "ExposeQueryPlanConfig": { - "description": "Expose query plan", - "type": "boolean" - }, - "ExposeTraceId": { - "type": "object", - "properties": { - "enabled": { - "description": "Expose the trace_id in response headers", - "default": false, - "type": "boolean" - }, - "format": { - "description": "#/definitions/TraceIdFormat", - "$ref": "#/definitions/TraceIdFormat" - }, - "header_name": { - "description": "Choose the header name to expose trace_id (default: apollo-trace-id)", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "ExtendedErrorMetricsMode": { - "description": "Extended Open Telemetry error metrics mode", - "oneOf": [ - { - "description": "Do not send extended OTLP error metrics", - "type": "string", - "enum": [ - "disabled" - ] - }, - { - "description": "Send extended OTLP error metrics to Apollo Studio with additional dimensions [`extensions.service`, `extensions.code`]. If enabled, it's also recommended to enable `redaction_policy: extended` on subgraphs to send the `extensions.code` for subgraph errors.", - "type": "string", - "enum": [ - "enabled" - ] - } - ] - }, - "FieldName": { - "oneOf": [ - { - "description": "The GraphQL field name", - "type": "string", - "enum": [ - "string" - ] - } - ] - }, - "FieldType": { - "oneOf": [ - { - "description": "The GraphQL field name", - "type": "string", - "enum": [ - "name" - ] - }, - { - "description": "The GraphQL field type - `bool` - `number` - `scalar` - `object` - `list`", - "type": "string", - "enum": [ - "type" - ] - } - ] - }, - "FileUploadProtocols": { - "description": "Configuration for the various protocols supported by the file upload plugin", - "type": "object", - "required": [ - "multipart" - ], - "properties": { - "multipart": { - "description": "#/definitions/MultipartRequest", - "$ref": "#/definitions/MultipartRequest" - } - }, - "additionalProperties": false - }, - "FileUploadsConfig": { - "description": "Configuration for File Uploads plugin", - "type": "object", - "required": [ - "enabled", - "protocols" - ], - "properties": { - "enabled": { - "description": "Whether the file upload plugin should be enabled (default: false)", - "type": "boolean" - }, - "protocols": { - "description": "#/definitions/FileUploadProtocols", - "$ref": "#/definitions/FileUploadProtocols" - } - }, - "additionalProperties": false - }, - "ForbidMutationsConfig": { - "description": "Forbid mutations configuration", - "type": "boolean" - }, - "ForwardHeaders": { - "description": "Forward headers", - "oneOf": [ - { - "description": "Don't send any headers", - "type": "string", - "enum": [ - "none" - ] - }, - { - "description": "Send all headers", - "type": "string", - "enum": [ - "all" - ] - }, - { - "description": "Send only the headers specified", - "type": "object", - "required": [ - "only" - ], - "properties": { - "only": { - "description": "Send only the headers specified", - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - { - "description": "Send all headers except those specified", - "type": "object", - "required": [ - "except" - ], - "properties": { - "except": { - "description": "Send all headers except those specified", - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - } - ] - }, - "ForwardValues": { - "description": "Forward GraphQL variables", - "oneOf": [ - { - "description": "Dont send any variables", - "type": "string", - "enum": [ - "none" - ] - }, - { - "description": "Send all variables", - "type": "string", - "enum": [ - "all" - ] - }, - { - "description": "Send only the variables specified", - "type": "object", - "required": [ - "only" - ], - "properties": { - "only": { - "description": "Send only the variables specified", - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - { - "description": "Send all variables except those specified", - "type": "object", - "required": [ - "except" - ], - "properties": { - "except": { - "description": "Send all variables except those specified", - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - } - ] - }, - "GraphQLAttributes": { - "type": "object", - "properties": { - "graphql.field.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.field.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.list.length": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.operation.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.type.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": false - }, - "GraphQLInstrumentsConfig": { - "type": "object", - "properties": { - "field.execution": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector" - }, - "list.length": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector" - } - }, - "additionalProperties": false - }, - "GraphQLSelector": { - "anyOf": [ - { - "description": "If the field is a list, the length of the list", - "type": "object", - "required": [ - "list_length" - ], - "properties": { - "list_length": { - "description": "#/definitions/ListLength", - "$ref": "#/definitions/ListLength" - } - }, - "additionalProperties": false - }, - { - "description": "The GraphQL field name", - "type": "object", - "required": [ - "field_name" - ], - "properties": { - "field_name": { - "description": "#/definitions/FieldName", - "$ref": "#/definitions/FieldName" - } - }, - "additionalProperties": false - }, - { - "description": "The GraphQL field type", - "type": "object", - "required": [ - "field_type" - ], - "properties": { - "field_type": { - "description": "#/definitions/FieldType", - "$ref": "#/definitions/FieldType" - } - }, - "additionalProperties": false - }, - { - "description": "The GraphQL type name", - "type": "object", - "required": [ - "type_name" - ], - "properties": { - "type_name": { - "description": "#/definitions/TypeName", - "$ref": "#/definitions/TypeName" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "operation_name" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "operation_name": { - "description": "#/definitions/OperationName", - "$ref": "#/definitions/OperationName" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "static" - ], - "properties": { - "static": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - } - }, - "additionalProperties": false - } - ] - }, - "GraphQLValue": { - "anyOf": [ - { - "description": "#/definitions/StandardUnit", - "$ref": "#/definitions/StandardUnit" - }, - { - "description": "#/definitions/GraphQLSelector", - "$ref": "#/definitions/GraphQLSelector" - } - ] - }, - "GrpcExporter": { - "type": "object", - "properties": { - "ca": { - "description": "The optional certificate authority (CA) certificate to be used in TLS configuration.", - "default": null, - "type": "string", - "nullable": true - }, - "cert": { - "description": "The optional cert for tls config", - "default": null, - "type": "string", - "nullable": true - }, - "domain_name": { - "description": "The optional domain name for tls config. Note that domain name is will be defaulted to match the endpoint is not explicitly set.", - "default": null, - "type": "string", - "nullable": true - }, - "key": { - "description": "The optional private key file for TLS configuration.", - "default": null, - "type": "string", - "nullable": true - }, - "metadata": { - "description": "gRPC metadata", - "default": {}, - "type": "object", - "additionalProperties": true - } - }, - "additionalProperties": false - }, - "Header": { - "description": "Insert a header", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "The name of the header", - "type": "string" - }, - "value": { - "description": "The value for the header", - "type": "string" - } - }, - "additionalProperties": false - }, - "HeadersLocation": { - "type": "object", - "required": [ - "request" - ], - "properties": { - "request": { - "description": "Propagate/Insert/Remove headers from request", - "type": "array", - "items": { - "description": "#/definitions/Operation", - "$ref": "#/definitions/Operation" - } - } - }, - "additionalProperties": false - }, - "HeartbeatInterval": { - "anyOf": [ - { - "description": "#/definitions/Disabled", - "$ref": "#/definitions/Disabled" - }, - { - "description": "#/definitions/Enabled", - "$ref": "#/definitions/Enabled" - }, - { - "description": "enable with custom interval, e.g. '100ms', '10s' or '1m'", - "type": "string" - } - ] - }, - "Homepage": { - "description": "Configuration options pertaining to the home page.", - "type": "object", - "properties": { - "enabled": { - "description": "Set to false to disable the homepage", - "default": true, - "type": "boolean" - }, - "graph_ref": { - "description": "Graph reference This will allow you to redirect from the Apollo Router landing page back to Apollo Studio Explorer", - "default": null, - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "Http2Config": { - "oneOf": [ - { - "description": "Enable HTTP2 for subgraphs", - "type": "string", - "enum": [ - "enable" - ] - }, - { - "description": "Disable HTTP2 for subgraphs", - "type": "string", - "enum": [ - "disable" - ] - }, - { - "description": "Only HTTP2 is active", - "type": "string", - "enum": [ - "http2only" - ] - } - ] - }, - "HttpExporter": { - "type": "object", - "properties": { - "headers": { - "description": "Headers to send on report requests", - "default": {}, - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "InMemoryCache": { - "description": "In memory cache configuration", - "type": "object", - "required": [ - "limit" - ], - "properties": { - "limit": { - "description": "Number of entries in the Least Recently Used cache", - "type": "integer", - "format": "uint", - "minimum": 1.0 - } - }, - "additionalProperties": false - }, - "Insert": { - "description": "Insert header", - "anyOf": [ - { - "description": "#/definitions/InsertStatic", - "$ref": "#/definitions/InsertStatic" - }, - { - "description": "#/definitions/InsertFromContext", - "$ref": "#/definitions/InsertFromContext" - }, - { - "description": "#/definitions/InsertFromBody", - "$ref": "#/definitions/InsertFromBody" - } - ] - }, - "InsertFromBody": { - "description": "Insert header with a value coming from body", - "type": "object", - "required": [ - "name", - "path" - ], - "properties": { - "default": { - "description": "The default if the path in the body did not resolve to an element", - "type": "string", - "nullable": true - }, - "name": { - "description": "The target header name", - "type": "string" - }, - "path": { - "description": "The path in the request body", - "type": "string" - } - }, - "additionalProperties": false - }, - "InsertFromContext": { - "description": "Insert header with a value coming from context key", - "type": "object", - "required": [ - "from_context", - "name" - ], - "properties": { - "from_context": { - "description": "Specify context key to fetch value", - "type": "string" - }, - "name": { - "description": "Specify header name", - "type": "string" - } - }, - "additionalProperties": false - }, - "InsertStatic": { - "description": "Insert static header", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "The name of the header", - "type": "string" - }, - "value": { - "description": "The value for the header", - "type": "string" - } - }, - "additionalProperties": false - }, - "InstrumentType": { - "oneOf": [ - { - "description": "A monotonic counter https://opentelemetry.io/docs/specs/otel/metrics/data-model/#sums", - "type": "string", - "enum": [ - "counter" - ] - }, - { - "description": "A histogram https://opentelemetry.io/docs/specs/otel/metrics/data-model/#histogram", - "type": "string", - "enum": [ - "histogram" - ] - } - ] - }, - "Instrument_for_CacheAttributes_and_SubgraphSelector_and_SubgraphValue": { - "type": "object", - "required": [ - "description", - "type", - "unit", - "value" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector" - }, - "description": { - "description": "The description of the instrument.", - "type": "string" - }, - "type": { - "description": "#/definitions/InstrumentType", - "$ref": "#/definitions/InstrumentType" - }, - "unit": { - "description": "The units of the instrument, e.g. \"ms\", \"bytes\", \"requests\".", - "type": "string" - }, - "value": { - "description": "#/definitions/SubgraphValue", - "$ref": "#/definitions/SubgraphValue" - } - }, - "additionalProperties": false - }, - "Instrument_for_ConnectorAttributes_and_ConnectorSelector_and_ConnectorValue": { - "type": "object", - "required": [ - "description", - "type", - "unit", - "value" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_ConnectorSelector", - "$ref": "#/definitions/Condition_for_ConnectorSelector" - }, - "description": { - "description": "The description of the instrument.", - "type": "string" - }, - "type": { - "description": "#/definitions/InstrumentType", - "$ref": "#/definitions/InstrumentType" - }, - "unit": { - "description": "The units of the instrument, e.g. \"ms\", \"bytes\", \"requests\".", - "type": "string" - }, - "value": { - "description": "#/definitions/ConnectorValue", - "$ref": "#/definitions/ConnectorValue" - } - }, - "additionalProperties": false - }, - "Instrument_for_GraphQLAttributes_and_GraphQLSelector_and_GraphQLValue": { - "type": "object", - "required": [ - "description", - "type", - "unit", - "value" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_GraphQLSelector", - "$ref": "#/definitions/Condition_for_GraphQLSelector" - }, - "description": { - "description": "The description of the instrument.", - "type": "string" - }, - "type": { - "description": "#/definitions/InstrumentType", - "$ref": "#/definitions/InstrumentType" - }, - "unit": { - "description": "The units of the instrument, e.g. \"ms\", \"bytes\", \"requests\".", - "type": "string" - }, - "value": { - "description": "#/definitions/GraphQLValue", - "$ref": "#/definitions/GraphQLValue" - } - }, - "additionalProperties": false - }, - "Instrument_for_RouterAttributes_and_RouterSelector_and_RouterValue": { - "type": "object", - "required": [ - "description", - "type", - "unit", - "value" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector" - }, - "description": { - "description": "The description of the instrument.", - "type": "string" - }, - "type": { - "description": "#/definitions/InstrumentType", - "$ref": "#/definitions/InstrumentType" - }, - "unit": { - "description": "The units of the instrument, e.g. \"ms\", \"bytes\", \"requests\".", - "type": "string" - }, - "value": { - "description": "#/definitions/RouterValue", - "$ref": "#/definitions/RouterValue" - } - }, - "additionalProperties": false - }, - "Instrument_for_SubgraphAttributes_and_SubgraphSelector_and_SubgraphValue": { - "type": "object", - "required": [ - "description", - "type", - "unit", - "value" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector" - }, - "description": { - "description": "The description of the instrument.", - "type": "string" - }, - "type": { - "description": "#/definitions/InstrumentType", - "$ref": "#/definitions/InstrumentType" - }, - "unit": { - "description": "The units of the instrument, e.g. \"ms\", \"bytes\", \"requests\".", - "type": "string" - }, - "value": { - "description": "#/definitions/SubgraphValue", - "$ref": "#/definitions/SubgraphValue" - } - }, - "additionalProperties": false - }, - "Instrument_for_SupergraphAttributes_and_SupergraphSelector_and_SupergraphValue": { - "type": "object", - "required": [ - "description", - "type", - "unit", - "value" - ], - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - }, - "condition": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector" - }, - "description": { - "description": "The description of the instrument.", - "type": "string" - }, - "type": { - "description": "#/definitions/InstrumentType", - "$ref": "#/definitions/InstrumentType" - }, - "unit": { - "description": "The units of the instrument, e.g. \"ms\", \"bytes\", \"requests\".", - "type": "string" - }, - "value": { - "description": "#/definitions/SupergraphValue", - "$ref": "#/definitions/SupergraphValue" - } - }, - "additionalProperties": false - }, - "Instrumentation": { - "description": "Instrumentation configuration", - "type": "object", - "properties": { - "events": { - "description": "#/definitions/Events", - "$ref": "#/definitions/Events" - }, - "instruments": { - "description": "#/definitions/InstrumentsConfig", - "$ref": "#/definitions/InstrumentsConfig" - }, - "spans": { - "description": "#/definitions/Spans", - "$ref": "#/definitions/Spans" - } - }, - "additionalProperties": false - }, - "InstrumentsConfig": { - "type": "object", - "properties": { - "cache": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::CacheInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::CacheInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument" - }, - "connector": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::instruments::ConnectorInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::instruments::ConnectorInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument" - }, - "default_requirement_level": { - "description": "#/definitions/DefaultAttributeRequirementLevel", - "$ref": "#/definitions/DefaultAttributeRequirementLevel" - }, - "graphql": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::GraphQLInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::GraphQLInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument" - }, - "router": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::instruments::RouterInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::instruments::RouterInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument" - }, - "subgraph": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::instruments::SubgraphInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::instruments::SubgraphInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument" - }, - "supergraph": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::instruments::SupergraphInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::instruments::SupergraphInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument" - } - }, - "additionalProperties": false - }, - "InvalidationEndpointConfig": { - "type": "object", - "required": [ - "listen", - "path" - ], - "properties": { - "concurrent_requests": { - "description": "Number of concurrent invalidation requests", - "default": 10, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "listen": { - "description": "#/definitions/ListenAddr", - "$ref": "#/definitions/ListenAddr" - }, - "path": { - "description": "Specify on which path you want to listen for invalidation endpoint.", - "type": "string" - }, - "scan_count": { - "description": "Number of keys to return at once from a redis SCAN command", - "default": 1000, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - } - }, - "additionalProperties": false - }, - "JWTConf": { - "type": "object", - "required": [ - "jwks" - ], - "properties": { - "header_name": { - "description": "HTTP header expected to contain JWT", - "default": "authorization", - "type": "string" - }, - "header_value_prefix": { - "description": "Header value prefix", - "default": "Bearer", - "type": "string" - }, - "ignore_other_prefixes": { - "description": "Whether to ignore any mismatched prefixes", - "default": false, - "type": "boolean" - }, - "jwks": { - "description": "List of JWKS used to verify tokens", - "type": "array", - "items": { - "description": "#/definitions/JwksConf", - "$ref": "#/definitions/JwksConf" - } - }, - "on_error": { - "description": "#/definitions/OnError", - "$ref": "#/definitions/OnError" - }, - "sources": { - "description": "Alternative sources to extract the JWT", - "type": "array", - "items": { - "description": "#/definitions/Source", - "$ref": "#/definitions/Source" - } - } - }, - "additionalProperties": false - }, - "JwksConf": { - "type": "object", - "required": [ - "url" - ], - "properties": { - "algorithms": { - "description": "List of accepted algorithms. Possible values are `HS256`, `HS384`, `HS512`, `ES256`, `ES384`, `RS256`, `RS384`, `RS512`, `PS256`, `PS384`, `PS512`, `EdDSA`", - "default": null, - "type": "array", - "items": { - "type": "string" - }, - "nullable": true - }, - "headers": { - "description": "List of headers to add to the JWKS request", - "type": "array", - "items": { - "description": "#/definitions/Header", - "$ref": "#/definitions/Header" - } - }, - "issuers": { - "description": "Expected issuers for tokens verified by that JWKS", - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true, - "nullable": true - }, - "poll_interval": { - "description": "Polling interval for each JWKS endpoint in human-readable format; defaults to 60s", - "default": { - "secs": 60, - "nanos": 0 - }, - "type": "string" - }, - "url": { - "description": "Retrieve the JWK Set", - "type": "string" - } - }, - "additionalProperties": false - }, - "LicenseEnforcementConfig": { - "type": "object" - }, - "ListLength": { - "oneOf": [ - { - "description": "The length of the list", - "type": "string", - "enum": [ - "value" - ] - } - ] - }, - "ListenAddr": { - "description": "Listening address.", - "anyOf": [ - { - "description": "Socket address.", - "type": "string" - }, - { - "description": "Unix socket.", - "type": "string" - } - ] - }, - "Logging": { - "description": "Logging configuration.", - "type": "object", - "properties": { - "common": { - "description": "#/definitions/LoggingCommon", - "$ref": "#/definitions/LoggingCommon" - }, - "stdout": { - "description": "#/definitions/StdOut", - "$ref": "#/definitions/StdOut" - } - }, - "additionalProperties": false - }, - "LoggingCommon": { - "type": "object", - "properties": { - "resource": { - "description": "The Open Telemetry resource", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - } - }, - "service_name": { - "description": "Set a service.name resource in your metrics", - "default": null, - "type": "string", - "nullable": true - }, - "service_namespace": { - "description": "Set a service.namespace attribute in your metrics", - "default": null, - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "MappingProblems": { - "type": "string", - "enum": [ - "problems", - "count" - ] - }, - "MetricAggregation": { - "oneOf": [ - { - "description": "An aggregation that summarizes a set of measurements as an histogram with explicitly defined buckets.", - "type": "object", - "required": [ - "histogram" - ], - "properties": { - "histogram": { - "type": "object", - "required": [ - "buckets" - ], - "properties": { - "buckets": { - "type": "array", - "items": { - "type": "number", - "format": "double" - } - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - { - "description": "Simply drop the metrics matching this view", - "type": "string", - "enum": [ - "drop" - ] - } - ] - }, - "MetricView": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "aggregation": { - "description": "#/definitions/MetricAggregation", - "$ref": "#/definitions/MetricAggregation", - "nullable": true - }, - "allowed_attribute_keys": { - "description": "An allow-list of attribute keys that will be preserved for the instrument.\n\nAny attribute recorded for the instrument with a key not in this set will be dropped. If the set is empty, all attributes will be dropped, if `None` all attributes will be kept.", - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true, - "nullable": true - }, - "description": { - "description": "New description to set to the instrument", - "type": "string", - "nullable": true - }, - "name": { - "description": "The instrument name you're targeting", - "type": "string" - }, - "unit": { - "description": "New unit to set to the instrument", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "Metrics": { - "description": "Per subgraph configuration for entity caching", - "type": "object", - "properties": { - "enabled": { - "description": "enables metrics evaluating the benefits of entity caching", - "default": false, - "type": "boolean" - }, - "separate_per_type": { - "description": "Adds the entity type name to attributes. This can greatly increase the cardinality", - "default": false, - "type": "boolean" - }, - "ttl": { - "description": "#/definitions/Ttl", - "$ref": "#/definitions/Ttl", - "nullable": true - } - }, - "additionalProperties": false - }, - "Metrics2": { - "description": "Metrics configuration", - "type": "object", - "properties": { - "common": { - "description": "#/definitions/MetricsCommon", - "$ref": "#/definitions/MetricsCommon" - }, - "otlp": { - "description": "#/definitions/Config13", - "$ref": "#/definitions/Config13" - }, - "prometheus": { - "description": "#/definitions/Config14", - "$ref": "#/definitions/Config14" - } - }, - "additionalProperties": false - }, - "MetricsCommon": { - "type": "object", - "properties": { - "buckets": { - "description": "Custom buckets for all histograms", - "default": [ - 0.001, - 0.005, - 0.015, - 0.05, - 0.1, - 0.2, - 0.3, - 0.4, - 0.5, - 1.0, - 5.0, - 10.0 - ], - "type": "array", - "items": { - "type": "number", - "format": "double" - } - }, - "resource": { - "description": "The Open Telemetry resource", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - } - }, - "service_name": { - "description": "Set a service.name resource in your metrics", - "default": null, - "type": "string", - "nullable": true - }, - "service_namespace": { - "description": "Set a service.namespace attribute in your metrics", - "default": null, - "type": "string", - "nullable": true - }, - "views": { - "description": "Views applied on metrics", - "type": "array", - "items": { - "description": "#/definitions/MetricView", - "$ref": "#/definitions/MetricView" - } - } - }, - "additionalProperties": false - }, - "Mode": { - "type": "string", - "enum": [ - "measure", - "enforce" - ] - }, - "MultipartRequest": { - "description": "Configuration for a multipart request for file uploads.\n\nThis protocol conforms to [jaydenseric's multipart spec](https://github.com/jaydenseric/graphql-multipart-request-spec)", - "type": "object", - "properties": { - "enabled": { - "description": "Whether to enable the multipart protocol for file uploads (default: true)", - "default": true, - "type": "boolean" - }, - "limits": { - "description": "#/definitions/MultipartRequestLimits", - "$ref": "#/definitions/MultipartRequestLimits" - }, - "mode": { - "description": "#/definitions/MultipartRequestMode", - "$ref": "#/definitions/MultipartRequestMode" - } - }, - "additionalProperties": false - }, - "MultipartRequestLimits": { - "description": "Request limits for a multipart request", - "type": "object", - "required": [ - "max_file_size", - "max_files" - ], - "properties": { - "max_file_size": { - "description": "The maximum size of each file, in bytes (default: 5MB)", - "type": "string" - }, - "max_files": { - "description": "The maximum amount of files allowed for a single query (default: 4)", - "type": "integer", - "format": "uint", - "minimum": 0.0 - } - }, - "additionalProperties": false - }, - "MultipartRequestMode": { - "description": "Supported mode for a multipart request", - "oneOf": [ - { - "description": "The multipart request will not be loaded into memory and instead will be streamed directly to the subgraph in the order received. This has some limitations, mainly that the query _must_ be able to be streamed directly to the subgraph without buffering.\n\nIn practice, this means that certain queries will fail due to ordering of the files.", - "type": "string", - "enum": [ - "stream" - ] - } - ] - }, - "NewContextConf": { - "description": "Configures the context", - "oneOf": [ - { - "description": "Send all context keys to coprocessor", - "type": "string", - "enum": [ - "all" - ] - }, - { - "description": "Send all context keys using deprecated names (from router 1.x) to coprocessor", - "type": "string", - "enum": [ - "deprecated" - ] - }, - { - "description": "Only send the list of context keys to coprocessor", - "type": "object", - "required": [ - "selective" - ], - "properties": { - "selective": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "additionalProperties": false - } - ] - }, - "OnError": { - "type": "string", - "enum": [ - "Continue", - "Error" - ] - }, - "Operation": { - "oneOf": [ - { - "type": "object", - "required": [ - "insert" - ], - "properties": { - "insert": { - "description": "#/definitions/Insert", - "$ref": "#/definitions/Insert" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "remove" - ], - "properties": { - "remove": { - "description": "#/definitions/Remove", - "$ref": "#/definitions/Remove" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "propagate" - ], - "properties": { - "propagate": { - "description": "#/definitions/Propagate", - "$ref": "#/definitions/Propagate" - } - }, - "additionalProperties": false - } - ] - }, - "OperationKind": { - "oneOf": [ - { - "description": "The raw operation kind.", - "type": "string", - "enum": [ - "string" - ] - } - ] - }, - "OperationName": { - "oneOf": [ - { - "description": "The raw operation name.", - "type": "string", - "enum": [ - "string" - ] - }, - { - "description": "A hash of the operation name.", - "type": "string", - "enum": [ - "hash" - ] - } - ] - }, - "PersistedQueries": { - "description": "Persisted Queries (PQ) configuration", - "type": "object", - "properties": { - "enabled": { - "description": "Activates Persisted Queries (disabled by default)", - "default": false, - "type": "boolean" - }, - "experimental_prewarm_query_plan_cache": { - "description": "#/definitions/PersistedQueriesPrewarmQueryPlanCache", - "$ref": "#/definitions/PersistedQueriesPrewarmQueryPlanCache" - }, - "hot_reload": { - "description": "Enables hot reloading of the local persisted query manifests", - "default": false, - "type": "boolean" - }, - "local_manifests": { - "description": "Enables using a local copy of the persisted query manifest to safelist operations", - "default": null, - "type": "array", - "items": { - "type": "string" - }, - "nullable": true - }, - "log_unknown": { - "description": "Enabling this field configures the router to log any freeform GraphQL request that is not in the persisted query list", - "default": false, - "type": "boolean" - }, - "safelist": { - "description": "#/definitions/PersistedQueriesSafelist", - "$ref": "#/definitions/PersistedQueriesSafelist" - } - }, - "additionalProperties": false - }, - "PersistedQueriesPrewarmQueryPlanCache": { - "description": "Persisted Queries (PQ) query plan cache prewarm configuration", - "type": "object", - "properties": { - "on_reload": { - "description": "Enabling this field uses the persisted query list to pre-warm the query planner cache on schema and config changes (enabled by default)", - "default": true, - "type": "boolean" - }, - "on_startup": { - "description": "Enabling this field uses the persisted query list to pre-warm the query planner cache on startup (disabled by default)", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "PersistedQueriesSafelist": { - "description": "Persisted Queries (PQ) Safelisting configuration", - "type": "object", - "properties": { - "enabled": { - "description": "Enables using the persisted query list as a safelist (disabled by default)", - "default": false, - "type": "boolean" - }, - "require_id": { - "description": "Enabling this field configures the router to reject any request that does not include the persisted query ID", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "Plugins": { - "properties": { - "experimental.broken": { - "description": "#/definitions/Config3", - "$ref": "#/definitions/Config3" - }, - "experimental.expose_query_plan": { - "description": "#/definitions/ExposeQueryPlanConfig", - "$ref": "#/definitions/ExposeQueryPlanConfig" - }, - "experimental.record": { - "description": "#/definitions/RecordConfig", - "$ref": "#/definitions/RecordConfig" - }, - "experimental.restricted": { - "description": "#/definitions/Config4", - "$ref": "#/definitions/Config4" - } - }, - "additionalProperties": false - }, - "Propagate": { - "description": "Propagate header", - "anyOf": [ - { - "description": "Propagate header given a header name", - "type": "object", - "required": [ - "named" - ], - "properties": { - "default": { - "description": "Default value for the header.", - "type": "string", - "nullable": true - }, - "named": { - "description": "The source header name", - "type": "string" - }, - "rename": { - "description": "An optional target header name", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - { - "description": "Propagate header given a regex to match header name", - "type": "object", - "required": [ - "matching" - ], - "properties": { - "matching": { - "description": "The regex on header name", - "type": "string" - } - }, - "additionalProperties": false - } - ] - }, - "Propagation": { - "description": "Configure propagation of traces. In general you won't have to do this as these are automatically configured along with any exporter you configure.", - "type": "object", - "properties": { - "aws_xray": { - "description": "Propagate AWS X-Ray", - "default": false, - "type": "boolean" - }, - "baggage": { - "description": "Propagate baggage https://www.w3.org/TR/baggage/", - "default": false, - "type": "boolean" - }, - "datadog": { - "description": "Propagate Datadog", - "default": false, - "type": "boolean" - }, - "jaeger": { - "description": "Propagate Jaeger", - "default": false, - "type": "boolean" - }, - "request": { - "description": "#/definitions/RequestPropagation", - "$ref": "#/definitions/RequestPropagation" - }, - "trace_context": { - "description": "Propagate trace context https://www.w3.org/TR/trace-context/", - "default": false, - "type": "boolean" - }, - "zipkin": { - "description": "Propagate Zipkin", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "Protocol": { - "type": "string", - "enum": [ - "grpc", - "http" - ] - }, - "Query": { - "oneOf": [ - { - "description": "The raw query kind.", - "type": "string", - "enum": [ - "string" - ] - }, - { - "description": "The query aliases.", - "type": "string", - "enum": [ - "aliases" - ] - }, - { - "description": "The query depth.", - "type": "string", - "enum": [ - "depth" - ] - }, - { - "description": "The query height.", - "type": "string", - "enum": [ - "height" - ] - }, - { - "description": "The query root fields.", - "type": "string", - "enum": [ - "root_fields" - ] - } - ] - }, - "QueryPlanCache": { - "description": "Cache configuration", - "type": "object", - "properties": { - "in_memory": { - "description": "#/definitions/InMemoryCache", - "$ref": "#/definitions/InMemoryCache" - }, - "redis": { - "description": "#/definitions/QueryPlanRedisCache", - "$ref": "#/definitions/QueryPlanRedisCache", - "nullable": true - } - }, - "additionalProperties": false - }, - "QueryPlanRedisCache": { - "description": "Redis cache configuration", - "type": "object", - "required": [ - "urls" - ], - "properties": { - "namespace": { - "description": "namespace used to prefix Redis keys", - "type": "string", - "nullable": true - }, - "password": { - "description": "Redis password if not provided in the URLs. This field takes precedence over the password in the URL", - "type": "string", - "nullable": true - }, - "pool_size": { - "description": "The size of the Redis connection pool", - "default": 1, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "required_to_start": { - "description": "Prevents the router from starting if it cannot connect to Redis", - "default": false, - "type": "boolean" - }, - "reset_ttl": { - "description": "When a TTL is set on a key, reset it when reading the data from that key", - "default": true, - "type": "boolean" - }, - "timeout": { - "description": "Redis request timeout (default: 2ms)", - "default": null, - "type": "string", - "nullable": true - }, - "tls": { - "description": "#/definitions/TlsClient", - "$ref": "#/definitions/TlsClient", - "nullable": true - }, - "ttl": { - "description": "TTL for entries", - "default": { - "secs": 2592000, - "nanos": 0 - }, - "type": "string", - "nullable": true - }, - "urls": { - "description": "List of URLs to the Redis cluster", - "type": "array", - "items": { - "type": "string", - "format": "uri" - } - }, - "username": { - "description": "Redis username if not provided in the URLs. This field takes precedence over the username in the URL", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "QueryPlanning": { - "description": "Query planning cache configuration", - "type": "object", - "properties": { - "cache": { - "description": "#/definitions/QueryPlanCache", - "$ref": "#/definitions/QueryPlanCache" - }, - "experimental_paths_limit": { - "description": "Before creating query plans, for each path of fields in the query we compute all the possible options to traverse that path via the subgraphs. Multiple options can arise because fields in the path can be provided by multiple subgraphs, and abstract types (i.e. unions and interfaces) returned by fields sometimes require the query planner to traverse through each constituent object type. The number of options generated in this computation can grow large if the schema or query are sufficiently complex, and that will increase the time spent planning.\n\nThis config allows specifying a per-path limit to the number of options considered. If any path's options exceeds this limit, query planning will abort and the operation will fail.\n\nThe default value is None, which specifies no limit.", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0.0, - "nullable": true - }, - "experimental_plans_limit": { - "description": "Sets a limit to the number of generated query plans. The planning process generates many different query plans as it explores the graph, and the list can grow large. By using this limit, we prevent that growth and still get a valid query plan, but it may not be the optimal one.\n\nThe default limit is set to 10000, but it may change in the future", - "default": null, - "type": "integer", - "format": "uint32", - "minimum": 0.0, - "nullable": true - }, - "experimental_reuse_query_plans": { - "description": "If cache warm up is configured, this will allow the router to keep a query plan created with the old schema, if it determines that the schema update does not affect the corresponding query", - "default": false, - "type": "boolean" - }, - "warmed_up_queries": { - "description": "Warms up the cache on reloads by running the query plan over a list of the most used queries (from the in memory cache) Configures the number of queries warmed up. Defaults to 1/3 of the in memory cache", - "default": null, - "type": "integer", - "format": "uint", - "minimum": 0.0, - "nullable": true - } - }, - "additionalProperties": false - }, - "RateLimit": { - "type": "object", - "properties": { - "capacity": { - "description": "Number of log lines allowed in interval per message", - "default": 1, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "enabled": { - "description": "Set to true to limit the rate of log messages", - "default": false, - "type": "boolean" - }, - "interval": { - "description": "Interval for rate limiting", - "default": { - "secs": 1, - "nanos": 0 - }, - "type": "string" - } - }, - "additionalProperties": false - }, - "RateLimitConf": { - "type": "object", - "required": [ - "capacity", - "interval" - ], - "properties": { - "capacity": { - "description": "Number of requests allowed", - "type": "integer", - "format": "uint64", - "minimum": 1.0 - }, - "interval": { - "description": "Per interval", - "type": "string" - } - }, - "additionalProperties": false - }, - "ReadinessConfig": { - "description": "Configuration options pertaining to the readiness health sub-component.", - "type": "object", - "properties": { - "allowed": { - "description": "How many rejections are allowed in an interval (default: 100) If this number is exceeded, the router will start to report unready.", - "default": 100, - "type": "integer", - "format": "uint", - "minimum": 0.0 - }, - "interval": { - "description": "#/definitions/ReadinessIntervalConfig", - "$ref": "#/definitions/ReadinessIntervalConfig" - } - }, - "additionalProperties": false - }, - "ReadinessIntervalConfig": { - "description": "Configuration options pertaining to the readiness health interval sub-component.", - "type": "object", - "properties": { - "sampling": { - "description": "The sampling interval (default: 5s)", - "default": "0s", - "type": "string", - "nullable": true - }, - "unready": { - "description": "The unready interval (default: 2 * sampling interval)", - "default": null, - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "RecordConfig": { - "description": "Request recording configuration.", - "type": "object", - "required": [ - "enabled" - ], - "properties": { - "enabled": { - "description": "The recording plugin is disabled by default.", - "type": "boolean" - }, - "storage_path": { - "description": "The path to the directory where recordings will be stored. Defaults to the current working directory.", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "RedisCache": { - "description": "Redis cache configuration", - "type": "object", - "required": [ - "urls" - ], - "properties": { - "namespace": { - "description": "namespace used to prefix Redis keys", - "type": "string", - "nullable": true - }, - "password": { - "description": "Redis password if not provided in the URLs. This field takes precedence over the password in the URL", - "type": "string", - "nullable": true - }, - "pool_size": { - "description": "The size of the Redis connection pool", - "default": 1, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "required_to_start": { - "description": "Prevents the router from starting if it cannot connect to Redis", - "default": false, - "type": "boolean" - }, - "reset_ttl": { - "description": "When a TTL is set on a key, reset it when reading the data from that key", - "default": true, - "type": "boolean" - }, - "timeout": { - "description": "Redis request timeout (default: 2ms)", - "default": null, - "type": "string", - "nullable": true - }, - "tls": { - "description": "#/definitions/TlsClient", - "$ref": "#/definitions/TlsClient", - "nullable": true - }, - "ttl": { - "description": "TTL for entries", - "default": null, - "type": "string", - "nullable": true - }, - "urls": { - "description": "List of URLs to the Redis cluster", - "type": "array", - "items": { - "type": "string", - "format": "uri" - } - }, - "username": { - "description": "Redis username if not provided in the URLs. This field takes precedence over the username in the URL", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "Remove": { - "description": "Remove header", - "oneOf": [ - { - "description": "Remove a header given a header name", - "type": "object", - "required": [ - "named" - ], - "properties": { - "named": { - "description": "Remove a header given a header name", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "Remove a header given a regex matching header name", - "type": "object", - "required": [ - "matching" - ], - "properties": { - "matching": { - "description": "Remove a header given a regex matching against the header name", - "type": "string" - } - }, - "additionalProperties": false - } - ] - }, - "RequestPropagation": { - "type": "object", - "required": [ - "header_name" - ], - "properties": { - "format": { - "description": "#/definitions/TraceIdFormat", - "$ref": "#/definitions/TraceIdFormat" - }, - "header_name": { - "description": "Choose the header name to expose trace_id (default: apollo-trace-id)", - "type": "string" - } - }, - "additionalProperties": false - }, - "ResponseStatus": { - "oneOf": [ - { - "description": "The http status code.", - "type": "string", - "enum": [ - "code" - ] - }, - { - "description": "The http status reason.", - "type": "string", - "enum": [ - "reason" - ] - } - ] - }, - "Router": { - "description": "Router level (APQ) configuration", - "type": "object", - "properties": { - "cache": { - "description": "#/definitions/Cache", - "$ref": "#/definitions/Cache" - } - }, - "additionalProperties": false - }, - "RouterAttributes": { - "description": "Common attributes for http server and client. See https://opentelemetry.io/docs/specs/semconv/http/http-spans/#common-attributes", - "type": "object", - "properties": { - "baggage": { - "description": "All key values from trace baggage.", - "default": null, - "type": "boolean", - "nullable": true - }, - "dd.trace_id": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "error.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.request.body.size": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.request.method": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.response.body.size": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.response.status_code": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.route": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.local.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.local.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.peer.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.peer.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.protocol.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.protocol.version": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.transport": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "server.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "server.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "trace_id": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.path": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.query": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.scheme": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "user_agent.original": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": false - }, - "RouterConf": { - "type": "object", - "required": [ - "jwt" - ], - "properties": { - "jwt": { - "description": "#/definitions/JWTConf", - "$ref": "#/definitions/JWTConf" - } - }, - "additionalProperties": false - }, - "RouterEventsConfig": { - "type": "object", - "properties": { - "error": { - "description": "#/definitions/StandardEventConfig_for_RouterSelector", - "$ref": "#/definitions/StandardEventConfig_for_RouterSelector" - }, - "request": { - "description": "#/definitions/StandardEventConfig_for_RouterSelector", - "$ref": "#/definitions/StandardEventConfig_for_RouterSelector" - }, - "response": { - "description": "#/definitions/StandardEventConfig_for_RouterSelector", - "$ref": "#/definitions/StandardEventConfig_for_RouterSelector" - } - }, - "additionalProperties": false - }, - "RouterInstrumentsConfig": { - "type": "object", - "properties": { - "http.server.active_requests": { - "description": "#/definitions/DefaultedStandardInstrument_for_ActiveRequestsAttributes", - "$ref": "#/definitions/DefaultedStandardInstrument_for_ActiveRequestsAttributes" - }, - "http.server.request.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - }, - "http.server.request.duration": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - }, - "http.server.response.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - } - }, - "additionalProperties": false - }, - "RouterRequestConf": { - "description": "What information is passed to a router request/response stage", - "type": "object", - "properties": { - "body": { - "description": "Send the body", - "default": false, - "type": "boolean" - }, - "condition": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector", - "nullable": true - }, - "context": { - "description": "#/definitions/ContextConf", - "$ref": "#/definitions/ContextConf" - }, - "headers": { - "description": "Send the headers", - "default": false, - "type": "boolean" - }, - "method": { - "description": "Send the method", - "default": false, - "type": "boolean" - }, - "path": { - "description": "Send the path", - "default": false, - "type": "boolean" - }, - "sdl": { - "description": "Send the SDL", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "RouterResponseConf": { - "description": "What information is passed to a router request/response stage", - "type": "object", - "properties": { - "body": { - "description": "Send the body", - "default": false, - "type": "boolean" - }, - "condition": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector", - "nullable": true - }, - "context": { - "description": "#/definitions/ContextConf", - "$ref": "#/definitions/ContextConf" - }, - "headers": { - "description": "Send the headers", - "default": false, - "type": "boolean" - }, - "sdl": { - "description": "Send the SDL", - "default": false, - "type": "boolean" - }, - "status_code": { - "description": "Send the HTTP status", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "RouterSelector": { - "anyOf": [ - { - "description": "A header from the request", - "type": "object", - "required": [ - "request_header" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "request_header": { - "description": "The name of the request header.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "The request method.", - "type": "object", - "required": [ - "request_method" - ], - "properties": { - "request_method": { - "description": "The request method enabled or not", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "description": "A value from context.", - "type": "object", - "required": [ - "request_context" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "request_context": { - "description": "The request context key.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "A header from the response", - "type": "object", - "required": [ - "response_header" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "response_header": { - "description": "The name of the request header.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "A status from the response", - "type": "object", - "required": [ - "response_status" - ], - "properties": { - "response_status": { - "description": "#/definitions/ResponseStatus", - "$ref": "#/definitions/ResponseStatus" - } - }, - "additionalProperties": false - }, - { - "description": "The trace ID of the request.", - "type": "object", - "required": [ - "trace_id" - ], - "properties": { - "trace_id": { - "description": "#/definitions/TraceIdFormat", - "$ref": "#/definitions/TraceIdFormat" - } - }, - "additionalProperties": false - }, - { - "description": "Apollo Studio operation id", - "type": "object", - "required": [ - "studio_operation_id" - ], - "properties": { - "studio_operation_id": { - "description": "Apollo Studio operation id", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "description": "A value from context.", - "type": "object", - "required": [ - "response_context" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "response_context": { - "description": "The response context key.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "The operation name from the query.", - "type": "object", - "required": [ - "operation_name" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "operation_name": { - "description": "#/definitions/OperationName", - "$ref": "#/definitions/OperationName" - } - }, - "additionalProperties": false - }, - { - "description": "A value from baggage.", - "type": "object", - "required": [ - "baggage" - ], - "properties": { - "baggage": { - "description": "The name of the baggage item.", - "type": "string" - }, - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - } - }, - "additionalProperties": false - }, - { - "description": "A value from an environment variable.", - "type": "object", - "required": [ - "env" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "env": { - "description": "The name of the environment variable", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "Deprecated, should not be used anymore, use static field instead", - "type": "string" - }, - { - "type": "object", - "required": [ - "static" - ], - "properties": { - "static": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "on_graphql_error" - ], - "properties": { - "on_graphql_error": { - "description": "Boolean set to true if the response body contains graphql error", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "error" - ], - "properties": { - "error": { - "description": "#/definitions/ErrorRepr", - "$ref": "#/definitions/ErrorRepr" - } - }, - "additionalProperties": false - } - ] - }, - "RouterShaping": { - "type": "object", - "properties": { - "concurrency_limit": { - "description": "The global concurrency limit", - "type": "integer", - "format": "uint", - "minimum": 0.0, - "nullable": true - }, - "global_rate_limit": { - "description": "#/definitions/RateLimitConf", - "$ref": "#/definitions/RateLimitConf", - "nullable": true - }, - "timeout": { - "description": "Enable timeout for incoming requests", - "default": null, - "type": "string" - } - }, - "additionalProperties": false - }, - "RouterSpans": { - "type": "object", - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional" - } - }, - "additionalProperties": false - }, - "RouterStage": { - "type": "object", - "properties": { - "request": { - "description": "#/definitions/RouterRequestConf", - "$ref": "#/definitions/RouterRequestConf" - }, - "response": { - "description": "#/definitions/RouterResponseConf", - "$ref": "#/definitions/RouterResponseConf" - } - } - }, - "RouterValue": { - "anyOf": [ - { - "description": "#/definitions/Standard", - "$ref": "#/definitions/Standard" - }, - { - "description": "#/definitions/RouterSelector", - "$ref": "#/definitions/RouterSelector" - } - ] - }, - "Sampler": { - "oneOf": [ - { - "description": "Always sample", - "type": "string", - "enum": [ - "always_on" - ] - }, - { - "description": "Never sample", - "type": "string", - "enum": [ - "always_off" - ] - } - ] - }, - "SamplerOption": { - "anyOf": [ - { - "description": "Sample a given fraction. Fractions >= 1 will always sample.", - "type": "number", - "format": "double" - }, - { - "description": "#/definitions/Sampler", - "$ref": "#/definitions/Sampler" - } - ] - }, - "Sandbox": { - "description": "Configuration options pertaining to the sandbox page.", - "type": "object", - "properties": { - "enabled": { - "description": "Set to true to enable sandbox", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "SelectorOrValue_for_ConnectorSelector": { - "anyOf": [ - { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - }, - { - "description": "#/definitions/ConnectorSelector", - "$ref": "#/definitions/ConnectorSelector" - } - ] - }, - "SelectorOrValue_for_GraphQLSelector": { - "anyOf": [ - { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - }, - { - "description": "#/definitions/GraphQLSelector", - "$ref": "#/definitions/GraphQLSelector" - } - ] - }, - "SelectorOrValue_for_RouterSelector": { - "anyOf": [ - { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - }, - { - "description": "#/definitions/RouterSelector", - "$ref": "#/definitions/RouterSelector" - } - ] - }, - "SelectorOrValue_for_SubgraphSelector": { - "anyOf": [ - { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - }, - { - "description": "#/definitions/SubgraphSelector", - "$ref": "#/definitions/SubgraphSelector" - } - ] - }, - "SelectorOrValue_for_SupergraphSelector": { - "anyOf": [ - { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - }, - { - "description": "#/definitions/SupergraphSelector", - "$ref": "#/definitions/SupergraphSelector" - } - ] - }, - "Server": { - "type": "object", - "properties": { - "http": { - "description": "#/definitions/ServerHttpConfig", - "$ref": "#/definitions/ServerHttpConfig" - } - }, - "additionalProperties": false - }, - "ServerHttpConfig": { - "description": "Configuration for HTTP", - "type": "object", - "properties": { - "header_read_timeout": { - "description": "Header read timeout in human-readable format; defaults to 10s", - "default": { - "secs": 10, - "nanos": 0 - }, - "type": "string" - } - }, - "additionalProperties": false - }, - "Source": { - "oneOf": [ - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "name": { - "description": "HTTP header expected to contain JWT", - "default": "authorization", - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "header" - ] - }, - "value_prefix": { - "description": "Header value prefix", - "default": "Bearer", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "name", - "type" - ], - "properties": { - "name": { - "description": "Name of the cookie containing the JWT", - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "cookie" - ] - } - }, - "additionalProperties": false - } - ] - }, - "SourceConfiguration": { - "description": "Configuration for a `@source` directive", - "type": "object", - "properties": { - "$config": { - "description": "Other values that can be used by connectors via `{$config.}`", - "default": {}, - "type": "object", - "additionalProperties": true - }, - "max_requests_per_operation": { - "description": "The maximum number of requests for this source", - "default": null, - "type": "integer", - "format": "uint", - "minimum": 0.0, - "nullable": true - }, - "override_url": { - "description": "Override the `@source(http: {baseURL:})`", - "default": null, - "type": "string", - "format": "uri", - "nullable": true - } - }, - "additionalProperties": false - }, - "SpanMode": { - "description": "Span mode to create new or deprecated spans", - "oneOf": [ - { - "description": "Keep the request span as root span and deprecated attributes. This option will eventually removed.", - "type": "string", - "enum": [ - "deprecated" - ] - }, - { - "description": "Use new OpenTelemetry spec compliant span attributes or preserve existing. This will be the default in future.", - "type": "string", - "enum": [ - "spec_compliant" - ] - } - ] - }, - "Spans": { - "type": "object", - "properties": { - "connector": { - "description": "#/definitions/ConnectorSpans", - "$ref": "#/definitions/ConnectorSpans" - }, - "default_attribute_requirement_level": { - "description": "#/definitions/DefaultAttributeRequirementLevel", - "$ref": "#/definitions/DefaultAttributeRequirementLevel" - }, - "mode": { - "description": "#/definitions/SpanMode", - "$ref": "#/definitions/SpanMode" - }, - "router": { - "description": "#/definitions/RouterSpans", - "$ref": "#/definitions/RouterSpans" - }, - "subgraph": { - "description": "#/definitions/SubgraphSpans", - "$ref": "#/definitions/SubgraphSpans" - }, - "supergraph": { - "description": "#/definitions/SupergraphSpans", - "$ref": "#/definitions/SupergraphSpans" - } - }, - "additionalProperties": false - }, - "Standard": { - "type": "string", - "enum": [ - "duration", - "unit" - ] - }, - "StandardAttribute": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "object", - "required": [ - "alias" - ], - "properties": { - "alias": { - "type": "string" - } - }, - "additionalProperties": false - } - ] - }, - "StandardEventConfig_for_ConnectorSelector": { - "anyOf": [ - { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - }, - { - "type": "object", - "required": [ - "condition", - "level" - ], - "properties": { - "condition": { - "description": "#/definitions/Condition_for_ConnectorSelector", - "$ref": "#/definitions/Condition_for_ConnectorSelector" - }, - "level": { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - } - } - } - ] - }, - "StandardEventConfig_for_RouterSelector": { - "anyOf": [ - { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - }, - { - "type": "object", - "required": [ - "condition", - "level" - ], - "properties": { - "condition": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector" - }, - "level": { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - } - } - } - ] - }, - "StandardEventConfig_for_SubgraphSelector": { - "anyOf": [ - { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - }, - { - "type": "object", - "required": [ - "condition", - "level" - ], - "properties": { - "condition": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector" - }, - "level": { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - } - } - } - ] - }, - "StandardEventConfig_for_SupergraphSelector": { - "anyOf": [ - { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - }, - { - "type": "object", - "required": [ - "condition", - "level" - ], - "properties": { - "condition": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector" - }, - "level": { - "description": "#/definitions/EventLevelConfig", - "$ref": "#/definitions/EventLevelConfig" - } - } - } - ] - }, - "StandardUnit": { - "type": "string", - "enum": [ - "unit" - ] - }, - "StdOut": { - "type": "object", - "properties": { - "enabled": { - "description": "Set to true to log to stdout.", - "default": true, - "type": "boolean" - }, - "format": { - "description": "#/definitions/logging_format", - "$ref": "#/definitions/logging_format" - }, - "rate_limit": { - "description": "#/definitions/RateLimit", - "$ref": "#/definitions/RateLimit" - }, - "tty_format": { - "description": "#/definitions/logging_format", - "$ref": "#/definitions/logging_format", - "nullable": true - } - }, - "additionalProperties": false - }, - "StrategyConfig": { - "description": "Algorithm for calculating the cost of an incoming query.", - "oneOf": [ - { - "description": "A simple, statically-defined cost mapping for operations and types.\n\nOperation costs: - Mutation: 10 - Query: 0 - Subscription 0\n\nType costs: - Object: 1 - Interface: 1 - Union: 1 - Scalar: 0 - Enum: 0", - "type": "object", - "required": [ - "static_estimated" - ], - "properties": { - "static_estimated": { - "type": "object", - "required": [ - "list_size", - "max" - ], - "properties": { - "list_size": { - "description": "The assumed length of lists returned by the operation.", - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "max": { - "description": "The maximum cost of a query", - "type": "number", - "format": "double" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - ] - }, - "Subgraph": { - "description": "Per subgraph configuration for entity caching", - "type": "object", - "properties": { - "enabled": { - "description": "activates caching for this subgraph, overrides the global configuration", - "default": true, - "type": "boolean", - "nullable": true - }, - "invalidation": { - "description": "#/definitions/SubgraphInvalidationConfig", - "$ref": "#/definitions/SubgraphInvalidationConfig", - "nullable": true - }, - "private_id": { - "description": "Context key used to separate cache sections per user", - "default": null, - "type": "string", - "nullable": true - }, - "redis": { - "description": "#/definitions/RedisCache", - "$ref": "#/definitions/RedisCache", - "nullable": true - }, - "ttl": { - "description": "#/definitions/Ttl", - "$ref": "#/definitions/Ttl", - "nullable": true - } - }, - "additionalProperties": false - }, - "SubgraphApq": { - "description": "Subgraph level Automatic Persisted Queries (APQ) configuration", - "type": "object", - "properties": { - "enabled": { - "description": "Enable", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "SubgraphAttributes": { - "type": "object", - "properties": { - "http.request.resend_count": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.document": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.operation.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.operation.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": false - }, - "SubgraphConfig": { - "anyOf": [ - { - "description": "Enable or disable error redaction for a subgraph", - "type": "boolean" - }, - { - "description": "Allow specific error extension keys for a subgraph", - "type": "object", - "required": [ - "allow_extensions_keys" - ], - "properties": { - "allow_extensions_keys": { - "description": "Allow specific extension keys for a subgraph. Will extending global allow list or override a global deny list", - "type": "array", - "items": { - "type": "string" - } - }, - "exclude_global_keys": { - "description": "Exclude specific extension keys from global allow/deny list", - "default": [], - "type": "array", - "items": { - "type": "string" - } - }, - "redact_message": { - "description": "Redact error messages for a subgraph", - "type": "boolean", - "nullable": true - } - } - }, - { - "description": "Deny specific error extension keys for a subgraph", - "type": "object", - "required": [ - "deny_extensions_keys" - ], - "properties": { - "deny_extensions_keys": { - "description": "Allow specific extension keys for a subgraph. Will extending global deny list or override a global allow list", - "type": "array", - "items": { - "type": "string" - } - }, - "exclude_global_keys": { - "description": "Exclude specific extension keys from global allow/deny list", - "default": [], - "type": "array", - "items": { - "type": "string" - } - }, - "redact_message": { - "description": "Redact error messages for a subgraph", - "type": "boolean", - "nullable": true - } - } - }, - { - "description": "Override global configuration, but don't allow or deny any new keys explicitly", - "type": "object", - "properties": { - "exclude_global_keys": { - "description": "Exclude specific extension keys from global allow/deny list", - "default": [], - "type": "array", - "items": { - "type": "string" - } - }, - "redact_message": { - "description": "Redact error messages for a subgraph", - "type": "boolean", - "nullable": true - } - } - } - ] - }, - "SubgraphConfiguration_for_CommonBatchingConfig": { - "description": "Configuration options pertaining to the subgraph server component.", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/CommonBatchingConfig", - "$ref": "#/definitions/CommonBatchingConfig" - }, - "subgraphs": { - "description": "per subgraph options", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/CommonBatchingConfig", - "$ref": "#/definitions/CommonBatchingConfig" - } - } - } - }, - "SubgraphConfiguration_for_Subgraph": { - "description": "Configuration options pertaining to the subgraph server component.", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/Subgraph", - "$ref": "#/definitions/Subgraph" - }, - "subgraphs": { - "description": "per subgraph options", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/Subgraph", - "$ref": "#/definitions/Subgraph" - } - } - } - }, - "SubgraphConfiguration_for_SubgraphApq": { - "description": "Configuration options pertaining to the subgraph server component.", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/SubgraphApq", - "$ref": "#/definitions/SubgraphApq" - }, - "subgraphs": { - "description": "per subgraph options", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/SubgraphApq", - "$ref": "#/definitions/SubgraphApq" - } - } - } - }, - "SubgraphConfiguration_for_TlsClient": { - "description": "Configuration options pertaining to the subgraph server component.", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/TlsClient", - "$ref": "#/definitions/TlsClient" - }, - "subgraphs": { - "description": "per subgraph options", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/TlsClient", - "$ref": "#/definitions/TlsClient" - } - } - } - }, - "SubgraphConnectorConfiguration": { - "description": "Configuration for a connector subgraph", - "type": "object", - "properties": { - "$config": { - "description": "Other values that can be used by connectors via `{$config.}`", - "default": {}, - "type": "object", - "additionalProperties": true - }, - "sources": { - "description": "A map of `@source(name:)` to configuration for that source", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/SourceConfiguration", - "$ref": "#/definitions/SourceConfiguration" - } - } - }, - "additionalProperties": false - }, - "SubgraphErrorConfig": { - "type": "object", - "properties": { - "all": { - "description": "#/definitions/ErrorConfiguration", - "$ref": "#/definitions/ErrorConfiguration" - }, - "subgraphs": { - "description": "Handling of errors coming from specified subgraphs", - "type": "object", - "additionalProperties": { - "description": "#/definitions/ErrorConfiguration", - "$ref": "#/definitions/ErrorConfiguration" - } - } - }, - "additionalProperties": false - }, - "SubgraphEventsConfig": { - "type": "object", - "properties": { - "error": { - "description": "#/definitions/StandardEventConfig_for_SubgraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SubgraphSelector" - }, - "request": { - "description": "#/definitions/StandardEventConfig_for_SubgraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SubgraphSelector" - }, - "response": { - "description": "#/definitions/StandardEventConfig_for_SubgraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SubgraphSelector" - } - }, - "additionalProperties": false - }, - "SubgraphInstrumentsConfig": { - "type": "object", - "properties": { - "http.client.request.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - }, - "http.client.request.duration": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - }, - "http.client.response.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - } - }, - "additionalProperties": false - }, - "SubgraphInvalidationConfig": { - "type": "object", - "properties": { - "enabled": { - "description": "Enable the invalidation", - "default": false, - "type": "boolean" - }, - "shared_key": { - "description": "Shared key needed to request the invalidation endpoint", - "default": "", - "type": "string" - } - }, - "additionalProperties": false - }, - "SubgraphPassthroughMode": { - "type": "object", - "properties": { - "all": { - "description": "#/definitions/WebSocketConfiguration", - "$ref": "#/definitions/WebSocketConfiguration", - "nullable": true - }, - "subgraphs": { - "description": "Configuration for specific subgraphs", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/WebSocketConfiguration", - "$ref": "#/definitions/WebSocketConfiguration" - } - } - }, - "additionalProperties": false - }, - "SubgraphQuery": { - "oneOf": [ - { - "description": "The raw query kind.", - "type": "string", - "enum": [ - "string" - ] - } - ] - }, - "SubgraphRequestConf": { - "description": "What information is passed to a subgraph request/response stage", - "type": "object", - "properties": { - "body": { - "description": "Send the body", - "default": false, - "type": "boolean" - }, - "condition": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector", - "nullable": true - }, - "context": { - "description": "#/definitions/ContextConf", - "$ref": "#/definitions/ContextConf" - }, - "headers": { - "description": "Send the headers", - "default": false, - "type": "boolean" - }, - "method": { - "description": "Send the method URI", - "default": false, - "type": "boolean" - }, - "service_name": { - "description": "Send the service name", - "default": false, - "type": "boolean" - }, - "subgraph_request_id": { - "description": "Send the subgraph request id", - "default": false, - "type": "boolean" - }, - "uri": { - "description": "Send the subgraph URI", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "SubgraphResponseConf": { - "description": "What information is passed to a subgraph request/response stage", - "type": "object", - "properties": { - "body": { - "description": "Send the body", - "default": false, - "type": "boolean" - }, - "condition": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector", - "nullable": true - }, - "context": { - "description": "#/definitions/ContextConf", - "$ref": "#/definitions/ContextConf" - }, - "headers": { - "description": "Send the headers", - "default": false, - "type": "boolean" - }, - "service_name": { - "description": "Send the service name", - "default": false, - "type": "boolean" - }, - "status_code": { - "description": "Send the http status", - "default": false, - "type": "boolean" - }, - "subgraph_request_id": { - "description": "Send the subgraph request id", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "SubgraphSelector": { - "anyOf": [ - { - "type": "object", - "required": [ - "subgraph_operation_name" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "subgraph_operation_name": { - "description": "#/definitions/OperationName", - "$ref": "#/definitions/OperationName" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_operation_kind" - ], - "properties": { - "subgraph_operation_kind": { - "description": "#/definitions/OperationKind", - "$ref": "#/definitions/OperationKind" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_name" - ], - "properties": { - "subgraph_name": { - "description": "The subgraph name", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_query" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "subgraph_query": { - "description": "#/definitions/SubgraphQuery", - "$ref": "#/definitions/SubgraphQuery" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_query_variable" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "subgraph_query_variable": { - "description": "The name of a subgraph query variable.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_response_data" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "subgraph_response_data": { - "description": "The subgraph response body json path.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_response_errors" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "subgraph_response_errors": { - "description": "The subgraph response body json path.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_request_header" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "subgraph_request_header": { - "description": "The name of a subgraph request header.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_response_header" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "subgraph_response_header": { - "description": "The name of a subgraph response header.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_response_status" - ], - "properties": { - "subgraph_response_status": { - "description": "#/definitions/ResponseStatus", - "$ref": "#/definitions/ResponseStatus" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_resend_count" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "subgraph_resend_count": { - "description": "The subgraph http resend count", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "supergraph_operation_name" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "supergraph_operation_name": { - "description": "#/definitions/OperationName", - "$ref": "#/definitions/OperationName" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "supergraph_operation_kind" - ], - "properties": { - "supergraph_operation_kind": { - "description": "#/definitions/OperationKind", - "$ref": "#/definitions/OperationKind" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "supergraph_query" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "supergraph_query": { - "description": "#/definitions/Query", - "$ref": "#/definitions/Query" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "supergraph_query_variable" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "supergraph_query_variable": { - "description": "The supergraph query variable name.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "supergraph_request_header" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "supergraph_request_header": { - "description": "The supergraph request header name.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "request_context" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "request_context": { - "description": "The request context key.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "response_context" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "response_context": { - "description": "The response context key.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "subgraph_on_graphql_error" - ], - "properties": { - "subgraph_on_graphql_error": { - "description": "Boolean set to true if the response body contains graphql error", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "baggage" - ], - "properties": { - "baggage": { - "description": "The name of the baggage item.", - "type": "string" - }, - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "env" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "env": { - "description": "The name of the environment variable", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "Deprecated, should not be used anymore, use static field instead", - "type": "string" - }, - { - "type": "object", - "required": [ - "static" - ], - "properties": { - "static": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "error" - ], - "properties": { - "error": { - "description": "#/definitions/ErrorRepr", - "$ref": "#/definitions/ErrorRepr" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "cache" - ], - "properties": { - "cache": { - "description": "#/definitions/CacheKind", - "$ref": "#/definitions/CacheKind" - }, - "entity_type": { - "description": "#/definitions/EntityType", - "$ref": "#/definitions/EntityType", - "nullable": true - } - }, - "additionalProperties": false - } - ] - }, - "SubgraphShaping": { - "description": "Traffic shaping options", - "type": "object", - "properties": { - "compression": { - "description": "#/definitions/Compression", - "$ref": "#/definitions/Compression", - "nullable": true - }, - "deduplicate_query": { - "description": "Enable query deduplication", - "type": "boolean", - "nullable": true - }, - "dns_resolution_strategy": { - "description": "#/definitions/DnsResolutionStrategy", - "$ref": "#/definitions/DnsResolutionStrategy", - "nullable": true - }, - "experimental_http2": { - "description": "#/definitions/Http2Config", - "$ref": "#/definitions/Http2Config", - "nullable": true - }, - "global_rate_limit": { - "description": "#/definitions/RateLimitConf", - "$ref": "#/definitions/RateLimitConf", - "nullable": true - }, - "timeout": { - "description": "Enable timeout for incoming requests", - "default": null, - "type": "string" - } - }, - "additionalProperties": false - }, - "SubgraphSpans": { - "type": "object", - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional" - } - }, - "additionalProperties": false - }, - "SubgraphStage": { - "description": "What information is passed to a subgraph request/response stage", - "type": "object", - "properties": { - "request": { - "description": "#/definitions/SubgraphRequestConf", - "$ref": "#/definitions/SubgraphRequestConf" - }, - "response": { - "description": "#/definitions/SubgraphResponseConf", - "$ref": "#/definitions/SubgraphResponseConf" - } - }, - "additionalProperties": false - }, - "SubgraphStages": { - "description": "What information is passed to a subgraph request/response stage", - "type": "object", - "properties": { - "all": { - "description": "#/definitions/SubgraphStage", - "$ref": "#/definitions/SubgraphStage" - } - }, - "additionalProperties": false - }, - "SubgraphValue": { - "anyOf": [ - { - "description": "#/definitions/Standard", - "$ref": "#/definitions/Standard" - }, - { - "description": "#/definitions/SubgraphSelector", - "$ref": "#/definitions/SubgraphSelector" - } - ] - }, - "SubscriptionConfig": { - "description": "Subscriptions configuration", - "type": "object", - "properties": { - "enable_deduplication": { - "description": "Enable the deduplication of subscription (for example if we detect the exact same request to subgraph we won't open a new websocket to the subgraph in passthrough mode) (default: true)", - "default": true, - "type": "boolean" - }, - "enabled": { - "description": "Enable subscription", - "default": true, - "type": "boolean" - }, - "max_opened_subscriptions": { - "description": "This is a limit to only have maximum X opened subscriptions at the same time. By default if it's not set there is no limit.", - "default": null, - "type": "integer", - "format": "uint", - "minimum": 0.0, - "nullable": true - }, - "mode": { - "description": "#/definitions/SubscriptionModeConfig", - "$ref": "#/definitions/SubscriptionModeConfig" - }, - "queue_capacity": { - "description": "It represent the capacity of the in memory queue to know how many events we can keep in a buffer", - "default": null, - "type": "integer", - "format": "uint", - "minimum": 0.0, - "nullable": true - } - }, - "additionalProperties": false - }, - "SubscriptionModeConfig": { - "type": "object", - "properties": { - "callback": { - "description": "#/definitions/CallbackMode", - "$ref": "#/definitions/CallbackMode", - "nullable": true - }, - "passthrough": { - "description": "#/definitions/SubgraphPassthroughMode", - "$ref": "#/definitions/SubgraphPassthroughMode", - "nullable": true - } - }, - "additionalProperties": false - }, - "Supergraph": { - "description": "Configuration options pertaining to the supergraph server component.", - "type": "object", - "properties": { - "connection_shutdown_timeout": { - "description": "The timeout for shutting down connections during a router shutdown or a schema reload.", - "default": { - "secs": 60, - "nanos": 0 - }, - "type": "string" - }, - "defer_support": { - "description": "Set to false to disable defer support", - "default": true, - "type": "boolean" - }, - "early_cancel": { - "description": "abort request handling when the client drops the connection. Default: false. When set to true, some parts of the request pipeline like telemetry will not work properly, but request handling will stop immediately when the client connection is closed.", - "default": false, - "type": "boolean" - }, - "experimental_log_on_broken_pipe": { - "description": "Log a message if the client closes the connection before the response is sent. Default: false.", - "default": false, - "type": "boolean" - }, - "generate_query_fragments": { - "description": "Enable QP generation of fragments for subgraph requests Default: true", - "default": true, - "type": "boolean" - }, - "introspection": { - "description": "Enable introspection Default: false", - "default": false, - "type": "boolean" - }, - "listen": { - "description": "#/definitions/ListenAddr", - "$ref": "#/definitions/ListenAddr" - }, - "path": { - "description": "The HTTP path on which GraphQL requests will be served. default: \"/\"", - "default": "/", - "type": "string" - }, - "query_planning": { - "description": "#/definitions/QueryPlanning", - "$ref": "#/definitions/QueryPlanning" - } - }, - "additionalProperties": false - }, - "SupergraphAttributes": { - "description": "Attributes for Cost", - "type": "object", - "properties": { - "cost.actual": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.delta": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.estimated": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.result": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.document": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.operation.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.operation.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": false - }, - "SupergraphEventsConfig": { - "type": "object", - "properties": { - "error": { - "description": "#/definitions/StandardEventConfig_for_SupergraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SupergraphSelector" - }, - "request": { - "description": "#/definitions/StandardEventConfig_for_SupergraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SupergraphSelector" - }, - "response": { - "description": "#/definitions/StandardEventConfig_for_SupergraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SupergraphSelector" - } - }, - "additionalProperties": false - }, - "SupergraphInstrumentsConfig": { - "type": "object", - "properties": { - "cost.actual": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - }, - "cost.delta": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - }, - "cost.estimated": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - } - }, - "additionalProperties": false - }, - "SupergraphRequestConf": { - "description": "What information is passed to a router request/response stage", - "type": "object", - "properties": { - "body": { - "description": "Send the body", - "default": false, - "type": "boolean" - }, - "condition": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector", - "nullable": true - }, - "context": { - "description": "#/definitions/ContextConf", - "$ref": "#/definitions/ContextConf" - }, - "headers": { - "description": "Send the headers", - "default": false, - "type": "boolean" - }, - "method": { - "description": "Send the method", - "default": false, - "type": "boolean" - }, - "sdl": { - "description": "Send the SDL", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "SupergraphResponseConf": { - "description": "What information is passed to a router request/response stage", - "type": "object", - "properties": { - "body": { - "description": "Send the body", - "default": false, - "type": "boolean" - }, - "condition": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector", - "nullable": true - }, - "context": { - "description": "#/definitions/ContextConf", - "$ref": "#/definitions/ContextConf" - }, - "headers": { - "description": "Send the headers", - "default": false, - "type": "boolean" - }, - "sdl": { - "description": "Send the SDL", - "default": false, - "type": "boolean" - }, - "status_code": { - "description": "Send the HTTP status", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false - }, - "SupergraphSelector": { - "anyOf": [ - { - "type": "object", - "required": [ - "operation_name" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "operation_name": { - "description": "#/definitions/OperationName", - "$ref": "#/definitions/OperationName" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "operation_kind" - ], - "properties": { - "operation_kind": { - "description": "#/definitions/OperationKind", - "$ref": "#/definitions/OperationKind" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "query" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "query": { - "description": "#/definitions/Query", - "$ref": "#/definitions/Query" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "query_variable" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "query_variable": { - "description": "The name of a graphql query variable.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "request_header" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "request_header": { - "description": "The name of the request header.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "response_header" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "response_header": { - "description": "The name of the response header.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "A status from the response", - "type": "object", - "required": [ - "response_status" - ], - "properties": { - "response_status": { - "description": "#/definitions/ResponseStatus", - "$ref": "#/definitions/ResponseStatus" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "request_context" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "request_context": { - "description": "The request context key.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "response_context" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "response_context": { - "description": "The response context key.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "response_data" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "response_data": { - "description": "The supergraph response body json path of the chunks.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "response_errors" - ], - "properties": { - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - }, - "response_errors": { - "description": "The supergraph response body json path of the chunks.", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "baggage" - ], - "properties": { - "baggage": { - "description": "The name of the baggage item.", - "type": "string" - }, - "default": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue", - "nullable": true - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "env" - ], - "properties": { - "default": { - "description": "Optional default value.", - "type": "string", - "nullable": true - }, - "env": { - "description": "The name of the environment variable", - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "Deprecated, should not be used anymore, use static field instead", - "type": "string" - }, - { - "type": "object", - "required": [ - "static" - ], - "properties": { - "static": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "on_graphql_error" - ], - "properties": { - "on_graphql_error": { - "description": "Boolean set to true if the response body contains graphql error", - "type": "boolean" - } - }, - "additionalProperties": false - }, - { - "type": "object", - "required": [ - "error" - ], - "properties": { - "error": { - "description": "#/definitions/ErrorRepr", - "$ref": "#/definitions/ErrorRepr" - } - }, - "additionalProperties": false - }, - { - "description": "Cost attributes", - "type": "object", - "required": [ - "cost" - ], - "properties": { - "cost": { - "description": "#/definitions/CostValue", - "$ref": "#/definitions/CostValue" - } - }, - "additionalProperties": false - }, - { - "description": "Boolean returning true if it's the primary response and not events like subscription events or deferred responses", - "type": "object", - "required": [ - "is_primary_response" - ], - "properties": { - "is_primary_response": { - "description": "Boolean returning true if it's the primary response and not events like subscription events or deferred responses", - "type": "boolean" - } - }, - "additionalProperties": false - } - ] - }, - "SupergraphSpans": { - "type": "object", - "properties": { - "attributes": { - "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional", - "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional" - } - }, - "additionalProperties": false - }, - "SupergraphStage": { - "type": "object", - "properties": { - "request": { - "description": "#/definitions/SupergraphRequestConf", - "$ref": "#/definitions/SupergraphRequestConf" - }, - "response": { - "description": "#/definitions/SupergraphResponseConf", - "$ref": "#/definitions/SupergraphResponseConf" - } - } - }, - "SupergraphValue": { - "anyOf": [ - { - "description": "#/definitions/Standard", - "$ref": "#/definitions/Standard" - }, - { - "description": "#/definitions/Event_for_SupergraphSelector", - "$ref": "#/definitions/Event_for_SupergraphSelector" - }, - { - "description": "#/definitions/SupergraphSelector", - "$ref": "#/definitions/SupergraphSelector" - } - ] - }, - "Temporality": { - "oneOf": [ - { - "description": "Export cumulative metrics.", - "type": "string", - "enum": [ - "cumulative" - ] - }, - { - "description": "Export delta metrics. `Delta` should be used when exporting to DataDog Agent.", - "type": "string", - "enum": [ - "delta" - ] - } - ] - }, - "Tls": { - "description": "TLS related configuration options.", - "type": "object", - "properties": { - "connector": { - "description": "#/definitions/ConnectorConfiguration_for_TlsClient", - "$ref": "#/definitions/ConnectorConfiguration_for_TlsClient" - }, - "subgraph": { - "description": "#/definitions/SubgraphConfiguration_for_TlsClient", - "$ref": "#/definitions/SubgraphConfiguration_for_TlsClient" - }, - "supergraph": { - "description": "#/definitions/TlsSupergraph", - "$ref": "#/definitions/TlsSupergraph", - "nullable": true - } - }, - "additionalProperties": false - }, - "TlsClient": { - "description": "Configuration options pertaining to the subgraph server component.", - "type": "object", - "properties": { - "certificate_authorities": { - "description": "list of certificate authorities in PEM format", - "default": null, - "type": "string", - "nullable": true - }, - "client_authentication": { - "description": "#/definitions/TlsClientAuth", - "$ref": "#/definitions/TlsClientAuth", - "nullable": true - } - }, - "additionalProperties": false - }, - "TlsClientAuth": { - "description": "TLS client authentication", - "type": "object", - "required": [ - "certificate_chain", - "key" - ], - "properties": { - "certificate_chain": { - "description": "list of certificates in PEM format", - "writeOnly": true, - "type": "string" - }, - "key": { - "description": "key in PEM format", - "writeOnly": true, - "type": "string" - } - }, - "additionalProperties": false - }, - "TlsSupergraph": { - "description": "Configuration options pertaining to the supergraph server component.", - "type": "object", - "required": [ - "certificate", - "certificate_chain", - "key" - ], - "properties": { - "certificate": { - "description": "server certificate in PEM format", - "writeOnly": true, - "type": "string" - }, - "certificate_chain": { - "description": "list of certificate authorities in PEM format", - "writeOnly": true, - "type": "string" - }, - "key": { - "description": "server key in PEM format", - "writeOnly": true, - "type": "string" - } - }, - "additionalProperties": false - }, - "TraceIdFormat": { - "oneOf": [ - { - "description": "Format the Trace ID as a hexadecimal number\n\n(e.g. Trace ID 16 -> 00000000000000000000000000000010)", - "type": "string", - "enum": [ - "hexadecimal" - ] - }, - { - "description": "Format the Trace ID as a hexadecimal number\n\n(e.g. Trace ID 16 -> 00000000000000000000000000000010)", - "type": "string", - "enum": [ - "open_telemetry" - ] - }, - { - "description": "Format the Trace ID as a decimal number\n\n(e.g. Trace ID 16 -> 16)", - "type": "string", - "enum": [ - "decimal" - ] - }, - { - "description": "Datadog", - "type": "string", - "enum": [ - "datadog" - ] - }, - { - "description": "UUID format with dashes (eg. 67e55044-10b1-426f-9247-bb680e5fe0c8)", - "type": "string", - "enum": [ - "uuid" - ] - } - ] - }, - "Tracing": { - "description": "Tracing configuration", - "type": "object", - "properties": { - "common": { - "description": "#/definitions/TracingCommon", - "$ref": "#/definitions/TracingCommon" - }, - "datadog": { - "description": "#/definitions/Config16", - "$ref": "#/definitions/Config16" - }, - "experimental_response_trace_id": { - "description": "#/definitions/ExposeTraceId", - "$ref": "#/definitions/ExposeTraceId" - }, - "otlp": { - "description": "#/definitions/Config13", - "$ref": "#/definitions/Config13" - }, - "propagation": { - "description": "#/definitions/Propagation", - "$ref": "#/definitions/Propagation" - }, - "zipkin": { - "description": "#/definitions/Config15", - "$ref": "#/definitions/Config15" - } - }, - "additionalProperties": false - }, - "TracingCommon": { - "type": "object", - "properties": { - "max_attributes_per_event": { - "description": "The maximum attributes per event before discarding", - "default": 128, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "max_attributes_per_link": { - "description": "The maximum attributes per link before discarding", - "default": 128, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "max_attributes_per_span": { - "description": "The maximum attributes per span before discarding", - "default": 128, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "max_events_per_span": { - "description": "The maximum events per span before discarding", - "default": 128, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "max_links_per_span": { - "description": "The maximum links per span before discarding", - "default": 128, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "parent_based_sampler": { - "description": "Whether to use parent based sampling", - "default": true, - "type": "boolean" - }, - "preview_datadog_agent_sampling": { - "description": "Use datadog agent sampling. This means that all spans will be sent to the Datadog agent and the `sampling.priority` attribute will be used to control if the span will then be sent to Datadog", - "default": null, - "type": "boolean", - "nullable": true - }, - "resource": { - "description": "The Open Telemetry resource", - "default": {}, - "type": "object", - "additionalProperties": { - "description": "#/definitions/AttributeValue", - "$ref": "#/definitions/AttributeValue" - } - }, - "sampler": { - "description": "#/definitions/SamplerOption", - "$ref": "#/definitions/SamplerOption" - }, - "service_name": { - "description": "The trace service name", - "default": null, - "type": "string", - "nullable": true - }, - "service_namespace": { - "description": "The trace service namespace", - "default": null, - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "Ttl": { - "description": "Per subgraph configuration for entity caching", - "type": "string" - }, - "TypeName": { - "oneOf": [ - { - "description": "The GraphQL type name", - "type": "string", - "enum": [ - "string" - ] - } - ] - }, - "UriEndpoint": { - "type": "string" - }, - "WebSocketConfiguration": { - "description": "WebSocket configuration for a specific subgraph", - "type": "object", - "properties": { - "heartbeat_interval": { - "description": "#/definitions/HeartbeatInterval", - "$ref": "#/definitions/HeartbeatInterval" - }, - "path": { - "description": "Path on which WebSockets are listening", - "default": null, - "type": "string", - "nullable": true - }, - "protocol": { - "description": "#/definitions/WebSocketProtocol", - "$ref": "#/definitions/WebSocketProtocol" - } - }, - "additionalProperties": false - }, - "WebSocketProtocol": { - "type": "string", - "enum": [ - "graphql_ws", - "graphql_transport_ws" - ] - }, - "conditional_attribute_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector": { - "anyOf": [ - { - "description": "#/definitions/ConnectorSelector", - "$ref": "#/definitions/ConnectorSelector" - }, - { - "properties": { - "condition": { - "description": "#/definitions/Condition_for_ConnectorSelector", - "$ref": "#/definitions/Condition_for_ConnectorSelector" - } - } - } - ] - }, - "conditional_attribute_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector": { - "anyOf": [ - { - "description": "#/definitions/RouterSelector", - "$ref": "#/definitions/RouterSelector" - }, - { - "properties": { - "condition": { - "description": "#/definitions/Condition_for_RouterSelector", - "$ref": "#/definitions/Condition_for_RouterSelector" - } - } - } - ] - }, - "conditional_attribute_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector": { - "anyOf": [ - { - "description": "#/definitions/SubgraphSelector", - "$ref": "#/definitions/SubgraphSelector" - }, - { - "properties": { - "condition": { - "description": "#/definitions/Condition_for_SubgraphSelector", - "$ref": "#/definitions/Condition_for_SubgraphSelector" - } - } - } - ] - }, - "conditional_attribute_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector": { - "anyOf": [ - { - "description": "#/definitions/SupergraphSelector", - "$ref": "#/definitions/SupergraphSelector" - }, - { - "properties": { - "condition": { - "description": "#/definitions/Condition_for_SupergraphSelector", - "$ref": "#/definitions/Condition_for_SupergraphSelector" - } - } - } - ] - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::CacheInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument": { - "type": "object", - "properties": { - "apollo.router.operations.entity.cache": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Instrument_for_CacheAttributes_and_SubgraphSelector_and_SubgraphValue", - "$ref": "#/definitions/Instrument_for_CacheAttributes_and_SubgraphSelector_and_SubgraphValue" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector": { - "type": "object", - "properties": { - "graphql.type.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/SubgraphSelector", - "$ref": "#/definitions/SubgraphSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional": { - "type": "object", - "properties": { - "connector.http.method": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "connector.source.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "connector.url.template": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector": { - "type": "object", - "properties": { - "connector.http.method": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "connector.source.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "connector.url.template": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/ConnectorSelector", - "$ref": "#/definitions/ConnectorSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::events::ConnectorEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event": { - "type": "object", - "properties": { - "error": { - "description": "#/definitions/StandardEventConfig_for_ConnectorSelector", - "$ref": "#/definitions/StandardEventConfig_for_ConnectorSelector" - }, - "request": { - "description": "#/definitions/StandardEventConfig_for_ConnectorSelector", - "$ref": "#/definitions/StandardEventConfig_for_ConnectorSelector" - }, - "response": { - "description": "#/definitions/StandardEventConfig_for_ConnectorSelector", - "$ref": "#/definitions/StandardEventConfig_for_ConnectorSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Event_for_ConnectorAttributes_and_ConnectorSelector", - "$ref": "#/definitions/Event_for_ConnectorAttributes_and_ConnectorSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::instruments::ConnectorInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument": { - "type": "object", - "properties": { - "http.client.request.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - }, - "http.client.request.duration": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - }, - "http.client.response.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::connector::attributes::ConnectorAttributes_apollo_router::plugins::telemetry::config_new::connector::selectors::ConnectorSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Instrument_for_ConnectorAttributes_and_ConnectorSelector_and_ConnectorValue", - "$ref": "#/definitions/Instrument_for_ConnectorAttributes_and_ConnectorSelector_and_ConnectorValue" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::GraphQLInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument": { - "type": "object", - "properties": { - "field.execution": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector" - }, - "list.length": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Instrument_for_GraphQLAttributes_and_GraphQLSelector_and_GraphQLValue", - "$ref": "#/definitions/Instrument_for_GraphQLAttributes_and_GraphQLSelector_and_GraphQLValue" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector": { - "type": "object", - "properties": { - "graphql.field.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.field.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.list.length": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.operation.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.type.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/GraphQLSelector", - "$ref": "#/definitions/GraphQLSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional": { - "description": "Common attributes for http server and client. See https://opentelemetry.io/docs/specs/semconv/http/http-spans/#common-attributes", - "type": "object", - "properties": { - "baggage": { - "description": "All key values from trace baggage.", - "default": null, - "type": "boolean", - "nullable": true - }, - "dd.trace_id": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "error.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.request.body.size": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.request.method": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.response.body.size": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.response.status_code": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.route": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.local.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.local.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.peer.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.peer.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.protocol.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.protocol.version": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.transport": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "server.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "server.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "trace_id": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.path": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.query": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.scheme": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "user_agent.original": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector": { - "description": "Common attributes for http server and client. See https://opentelemetry.io/docs/specs/semconv/http/http-spans/#common-attributes", - "type": "object", - "properties": { - "baggage": { - "description": "All key values from trace baggage.", - "default": null, - "type": "boolean", - "nullable": true - }, - "dd.trace_id": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "error.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.request.body.size": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.request.method": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.response.body.size": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.response.status_code": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "http.route": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.local.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.local.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.peer.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.peer.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.protocol.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.protocol.version": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.transport": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "network.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "server.address": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "server.port": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "trace_id": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.path": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.query": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "url.scheme": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "user_agent.original": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/RouterSelector", - "$ref": "#/definitions/RouterSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::router::events::RouterEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event": { - "type": "object", - "properties": { - "error": { - "description": "#/definitions/StandardEventConfig_for_RouterSelector", - "$ref": "#/definitions/StandardEventConfig_for_RouterSelector" - }, - "request": { - "description": "#/definitions/StandardEventConfig_for_RouterSelector", - "$ref": "#/definitions/StandardEventConfig_for_RouterSelector" - }, - "response": { - "description": "#/definitions/StandardEventConfig_for_RouterSelector", - "$ref": "#/definitions/StandardEventConfig_for_RouterSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Event_for_RouterAttributes_and_RouterSelector", - "$ref": "#/definitions/Event_for_RouterAttributes_and_RouterSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::router::instruments::RouterInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument": { - "type": "object", - "properties": { - "http.server.active_requests": { - "description": "#/definitions/DefaultedStandardInstrument_for_ActiveRequestsAttributes", - "$ref": "#/definitions/DefaultedStandardInstrument_for_ActiveRequestsAttributes" - }, - "http.server.request.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - }, - "http.server.request.duration": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - }, - "http.server.response.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::router::attributes::RouterAttributes_apollo_router::plugins::telemetry::config_new::router::selectors::RouterSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Instrument_for_RouterAttributes_and_RouterSelector_and_RouterValue", - "$ref": "#/definitions/Instrument_for_RouterAttributes_and_RouterSelector_and_RouterValue" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional": { - "type": "object", - "properties": { - "http.request.resend_count": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.document": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.operation.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.operation.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector": { - "type": "object", - "properties": { - "http.request.resend_count": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.document": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.operation.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.graphql.operation.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "subgraph.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/SubgraphSelector", - "$ref": "#/definitions/SubgraphSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::events::SubgraphEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event": { - "type": "object", - "properties": { - "error": { - "description": "#/definitions/StandardEventConfig_for_SubgraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SubgraphSelector" - }, - "request": { - "description": "#/definitions/StandardEventConfig_for_SubgraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SubgraphSelector" - }, - "response": { - "description": "#/definitions/StandardEventConfig_for_SubgraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SubgraphSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Event_for_SubgraphAttributes_and_SubgraphSelector", - "$ref": "#/definitions/Event_for_SubgraphAttributes_and_SubgraphSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::instruments::SubgraphInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument": { - "type": "object", - "properties": { - "http.client.request.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - }, - "http.client.request.duration": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - }, - "http.client.response.body.size": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::subgraph::attributes::SubgraphAttributes_apollo_router::plugins::telemetry::config_new::subgraph::selectors::SubgraphSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Instrument_for_SubgraphAttributes_and_SubgraphSelector_and_SubgraphValue", - "$ref": "#/definitions/Instrument_for_SubgraphAttributes_and_SubgraphSelector_and_SubgraphValue" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::conditional::Conditional": { - "description": "Attributes for Cost", - "type": "object", - "properties": { - "cost.actual": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.delta": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.estimated": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.result": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.document": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.operation.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.operation.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector": { - "description": "Attributes for Cost", - "type": "object", - "properties": { - "cost.actual": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.delta": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.estimated": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "cost.result": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.document": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.operation.name": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - }, - "graphql.operation.type": { - "description": "#/definitions/StandardAttribute", - "$ref": "#/definitions/StandardAttribute", - "nullable": true - } - }, - "additionalProperties": { - "description": "#/definitions/SupergraphSelector", - "$ref": "#/definitions/SupergraphSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::events::SupergraphEventsConfig_apollo_router::plugins::telemetry::config_new::events::Event": { - "type": "object", - "properties": { - "error": { - "description": "#/definitions/StandardEventConfig_for_SupergraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SupergraphSelector" - }, - "request": { - "description": "#/definitions/StandardEventConfig_for_SupergraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SupergraphSelector" - }, - "response": { - "description": "#/definitions/StandardEventConfig_for_SupergraphSelector", - "$ref": "#/definitions/StandardEventConfig_for_SupergraphSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Event_for_SupergraphAttributes_and_SupergraphSelector", - "$ref": "#/definitions/Event_for_SupergraphAttributes_and_SupergraphSelector" - } - }, - "extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::instruments::SupergraphInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument": { - "type": "object", - "properties": { - "cost.actual": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - }, - "cost.delta": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - }, - "cost.estimated": { - "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector", - "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::supergraph::attributes::SupergraphAttributes_apollo_router::plugins::telemetry::config_new::supergraph::selectors::SupergraphSelector" - } - }, - "additionalProperties": { - "description": "#/definitions/Instrument_for_SupergraphAttributes_and_SupergraphSelector_and_SupergraphValue", - "$ref": "#/definitions/Instrument_for_SupergraphAttributes_and_SupergraphSelector_and_SupergraphValue" - } - }, - "logging_format": { - "oneOf": [ - { - "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Json.html", - "type": "object", - "required": [ - "json" - ], - "properties": { - "json": { - "type": "object", - "properties": { - "display_current_span": { - "description": "Include the current span in this log event.", - "default": false, - "type": "boolean" - }, - "display_filename": { - "description": "Include the filename with the log event.", - "default": false, - "type": "boolean" - }, - "display_level": { - "description": "Include the level with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_line_number": { - "description": "Include the line number with the log event.", - "default": false, - "type": "boolean" - }, - "display_resource": { - "description": "Include the resource with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_span_id": { - "description": "Include the span id (if any) with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_span_list": { - "description": "Include all of the containing span information with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_target": { - "description": "Include the target with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_thread_id": { - "description": "Include the thread_id with the log event.", - "default": false, - "type": "boolean" - }, - "display_thread_name": { - "description": "Include the thread_name with the log event.", - "default": false, - "type": "boolean" - }, - "display_timestamp": { - "description": "Include the timestamp with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_trace_id": { - "description": "#/definitions/DisplayTraceIdFormat", - "$ref": "#/definitions/DisplayTraceIdFormat" - }, - "span_attributes": { - "description": "List of span attributes to attach to the json log object", - "default": [], - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - { - "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Json.html", - "type": "string", - "enum": [ - "json" - ] - }, - { - "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Full.html", - "type": "object", - "required": [ - "text" - ], - "properties": { - "text": { - "type": "object", - "properties": { - "ansi_escape_codes": { - "description": "Process ansi escapes (default: true)", - "default": true, - "type": "boolean" - }, - "display_current_span": { - "description": "Include the current span in this log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_filename": { - "description": "Include the filename with the log event.", - "default": false, - "type": "boolean" - }, - "display_level": { - "description": "Include the level with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_line_number": { - "description": "Include the line number with the log event.", - "default": false, - "type": "boolean" - }, - "display_resource": { - "description": "Include the resource with the log event.", - "default": false, - "type": "boolean" - }, - "display_service_name": { - "description": "Include the service name with the log event.", - "default": false, - "type": "boolean" - }, - "display_service_namespace": { - "description": "Include the service namespace with the log event.", - "default": false, - "type": "boolean" - }, - "display_span_id": { - "description": "Include the span id (if any) with the log event. (default: false)", - "default": false, - "type": "boolean" - }, - "display_span_list": { - "description": "Include all of the containing span information with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_target": { - "description": "Include the target with the log event.", - "default": false, - "type": "boolean" - }, - "display_thread_id": { - "description": "Include the thread_id with the log event.", - "default": false, - "type": "boolean" - }, - "display_thread_name": { - "description": "Include the thread_name with the log event.", - "default": false, - "type": "boolean" - }, - "display_timestamp": { - "description": "Include the timestamp with the log event. (default: true)", - "default": true, - "type": "boolean" - }, - "display_trace_id": { - "description": "#/definitions/DisplayTraceIdFormat", - "$ref": "#/definitions/DisplayTraceIdFormat" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - { - "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Full.html", - "type": "string", - "enum": [ - "text" - ] - } - ] - } - } -} diff --git a/dev/dev.sh b/dev/dev.sh deleted file mode 100755 index 5757b02..0000000 --- a/dev/dev.sh +++ /dev/null @@ -1,18 +0,0 @@ -if [[ -z "${APOLLO_KEY}" ]]; then - echo "APOLLO_KEY is not set" - exit 1 -fi -if [[ -z "${APOLLO_GRAPH_REF}" ]]; then - echo "APOLLO_GRAPH_REF is not set" - exit 1 -fi - -if [ ! -f "./dev/router" ]; then - cd dev - curl -sSL https://router.apollo.dev/download/nix/v2.2.0 | sh - cd .. -fi - -rover supergraph compose --config ./dev/supergraph.yaml --output ./dev/supergraph.graphql - -./dev/router --config ./dev/router.yaml --supergraph ./dev/supergraph.graphql --dev diff --git a/dev/router.yaml b/dev/router.yaml deleted file mode 100644 index c94f915..0000000 --- a/dev/router.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# yaml-language-server: $schema=configuration_schema.json -supergraph: - listen: 127.0.0.1:4000 - -headers: - all: - request: - - propagate: - matching: .* - -authentication: - router: - jwt: - jwks: - - url: http://localhost:4008/.well-known/jwks.json - -authorization: - directives: - enabled: true - -cors: - allow_any_origin: true - -persisted_queries: - enabled: true - log_unknown: true - safelist: - enabled: false - require_id: false - -coprocessor: - url: http://localhost:8081 - timeout: 2s - router: - request: - headers: true # These boolean properties indicate which request data to include in the coprocessor request. All are optional and false by default. - subgraph: - all: - request: - headers: true - response: - headers: true diff --git a/dev/supergraph.yaml b/dev/supergraph.yaml deleted file mode 100644 index 18366d9..0000000 --- a/dev/supergraph.yaml +++ /dev/null @@ -1,35 +0,0 @@ -federation_version: 2 -subgraphs: - checkout: - routing_url: http://localhost:4001/ - schema: - file: ../subgraphs/checkout/schema.graphql - discovery: - routing_url: http://localhost:4002/ - schema: - file: ../subgraphs/discovery/schema.graphql - inventory: - routing_url: http://localhost:4003/ - schema: - file: ../subgraphs/inventory/schema.graphql - orders: - routing_url: http://localhost:4004/ - schema: - file: ../subgraphs/orders/schema.graphql - products: - routing_url: http://localhost:4005/ - schema: - file: ../subgraphs/products/schema.graphql - reviews: - routing_url: http://localhost:4006/ - schema: - file: ../subgraphs/reviews/schema.graphql - shipping: - routing_url: http://localhost:4007/ - schema: - file: ../subgraphs/shipping/schema.graphql - users: - routing_url: http://localhost:4008/ - schema: - file: ../subgraphs/users/schema.graphql - \ No newline at end of file diff --git a/package.json b/package.json index 0db684f..f97f7e7 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,6 @@ "scripts": { "update-lockfiles": "./scripts/update-lockfiles.sh", "dev": "npm-run-all --parallel dev:*", - "dev:router": "./dev/dev.sh", "dev:checkout": "PORT=4001 npm run dev -w subgraphs/checkout", "dev:discovery": "PORT=4002 npm run dev -w subgraphs/discovery", "dev:inventory": "PORT=4003 npm run dev -w subgraphs/inventory",