From 1d47562b7a33bc0d365ed0641ea22a5b98a11f2e Mon Sep 17 00:00:00 2001 From: Cooper Maruyama Date: Thu, 30 Apr 2026 00:17:17 -0700 Subject: [PATCH 1/4] fix(docs): patch alchemy + use bundle:false to stop re-bundling worker.js MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes the regression where every `/docs/*` route on docs.stackpanel.com returns `500 Internal Server Error` while `/` and `/api/search` work. Root cause: alchemy@2.0.0-beta.20's `Cloudflare.Worker.prepareBundle` runs `.open-next/worker.js` through `cloudflareRolldown` even when `isExternal: true`. The re-bundle rewrites OpenNext's dynamic `import("./server-functions/default/handler.mjs")` so its wrapper resolver returns `undefined`, which then throws `TypeError: Cannot destructure property 'name' of '(intermediate value)'` inside `createGenericHandler`. Static routes survive because they're served by the ASSETS binding without ever entering the broken handler. Fix: a 2-edit patch to alchemy that adds `WorkerProps.bundle?: boolean` (default `true`). When `bundle: false`, `prepareBundle` short-circuits — reads `props.main` directly and returns a single-file `Bundle.BundleOutput` whose hash is `sha256(content)`. No rolldown step, byte-identical upload. This change applies the patch to our local alchemy install via bun's `patchedDependencies` and turns it on in apps/docs. Patch is the same one I just pushed to czxtm/alchemy-effect:main as the proposed upstream PR; once it ships in a beta release we can drop both the patch file and the `bundle: false` prop. bd: closes stackpanel-49t Other changes: - bun.lock cleanup: removes `bun2nix` (already removed from the workspace package.json in d82d13a8 but not from the lockfile). --- apps/docs/alchemy.run.ts | 17 +++++++ bun.lock | 7 ++- issues.jsonl | 4 +- package.json | 3 ++ patches/alchemy@2.0.0-beta.20.patch | 72 +++++++++++++++++++++++++++++ 5 files changed, 98 insertions(+), 5 deletions(-) create mode 100644 patches/alchemy@2.0.0-beta.20.patch diff --git a/apps/docs/alchemy.run.ts b/apps/docs/alchemy.run.ts index 3472e707..773014ff 100644 --- a/apps/docs/alchemy.run.ts +++ b/apps/docs/alchemy.run.ts @@ -47,6 +47,23 @@ const program = Effect.gen(function* () { // `isExternal: true` skips the wrapper so the bundle keeps OpenNext's own // entrypoint. isExternal: true, + // `isExternal: true` is not enough — alchemy's `prepareBundle` still runs + // `.open-next/worker.js` through `cloudflareRolldown`, which rewrites the + // dynamic `import("./server-functions/default/handler.mjs")` (and friends) + // in ways that make OpenNext's `resolveWrapper(...)` return `undefined` + // at request time. The deployed Worker then throws + // `TypeError: Cannot destructure property 'name' of '(intermediate value)'` + // inside `createGenericHandler`, and every dynamic Next route (`/docs/*`, + // …) returns 500. Static routes (`/`, `/api/search`) keep working because + // they don't reach the wrapper resolver. + // + // The `bundle: false` opt-out is added by patches/alchemy@2.0.0-beta.20.patch + // (a backport of the proposed upstream change at + // https://github.com/alchemy-run/alchemy-effect — the + // `feat(cloudflare/Worker): add bundle: false …` commit). It short- + // circuits `prepareBundle` to upload `props.main` byte-for-byte. Drop the + // patch + this prop once the upstream feature ships in alchemy. + bundle: false, // Mirror apps/docs/wrangler.jsonc — OpenNext serves its own routing so the // worker must run for missed asset paths, and we want the SPA-style // trailing-slash handling for static MDX routes. diff --git a/bun.lock b/bun.lock index 2c74f62e..7b2ae3b0 100644 --- a/bun.lock +++ b/bun.lock @@ -28,7 +28,6 @@ "@types/node": "^22.13.11", "@typescript/native-preview": "^7.0.0-dev.20260317.1", "alchemy": "2.0.0-beta.20", - "bun2nix": "^2.0.6", "cloudflare": "^5.2.0", "rolldown": "1.0.0-rc.13", "turbo": "^2.5.4", @@ -425,9 +424,6 @@ "sst": "^3.17.25", "superjson": "^2.2.6", }, - "devDependencies": { - "bun2nix": "latest", - }, }, "packages/proto": { "name": "@stackpanel/proto", @@ -546,6 +542,9 @@ }, }, }, + "patchedDependencies": { + "alchemy@2.0.0-beta.20": "patches/alchemy@2.0.0-beta.20.patch", + }, "overrides": { "@effect/platform-bun": "4.0.0-beta.48", "@effect/platform-node": "4.0.0-beta.48", diff --git a/issues.jsonl b/issues.jsonl index e3eeda0c..501cb5a0 100644 --- a/issues.jsonl +++ b/issues.jsonl @@ -1,3 +1,6 @@ +{"id":"stackpanel-8yl","title":"Bootstrap alchemy v2 Cloudflare state store (one-time interactive)","description":"With the alchemy@2 migration + working CF API token, main's production deploys now fail with:\n\n AuthError: State store not found for script alchemy-state-store. Deploy the state store first.\n at node_modules/alchemy/src/Cloudflare/StateStore/State.ts:101:17\n\nThis is a new blocker class, exposed only after the auth fix landed (previously masked by 401s from the under-scoped token). It's a deliberate guard in alchemy v2:\n\n // TODO(sam): do we want to support bootstrapping the state store from CI?\n // for now - just die here\n\nThe state store provisioning flow (in alchemy/src/Cloudflare/StateStore/State.ts):\n\n 1. Read profile credentials cache (~/.alchemy/\u003cprofile\u003e/cloudflare-state-store)\n → if present, use it.\n 2. Else, query Cloudflare for the alchemy-state-store worker.\n 3. If the worker exists → loginWithCloudflare() (works in CI; uses the API\n token to read the secrets-store auth token via an edge-preview probe).\n 4. If the worker does NOT exist AND CI=true → die with the error above.\n 5. If the worker does NOT exist AND CI=false → interactive prompt; deploys\n the state store + secrets store + auth token.\n\nThe recommended remediation is a one-time interactive bootstrap from a maintainer's devshell:\n\n cd /path/to/stackpanel\n nix develop --impure\n bunx alchemy deploy --stage staging --yes # or production\n # When prompted \"Cloudflare State Store not found. Do you want to deploy it?\" → y\n # alchemy creates:\n # - Cloudflare Worker: alchemy-state-store\n # - Cloudflare Secrets Store: \u003csingle per-account\u003e\n # - Auth token in the secrets store\n # After this, every CI deploy on every branch can use Cloudflare.state()\n # because step (3) above succeeds.\n\nThe CF API token already provisioned (cfut_A8wV…) has all the scopes needed (verified\nvia curl probe: workers/scripts read+write, workers/subdomain, kv/namespaces,\nzones/.../workers/routes, workers/domains).\n\nAcceptance:\n- One-time deploy of alchemy-state-store completed (verified via\n GET /accounts/:id/workers/scripts/alchemy-state-store returning 200).\n- A subsequent CI Deploy Web run on main against --stage production succeeds\n through the Cloudflare.state() initialization step.\n- README/AGENTS.md updated with the bootstrap procedure so future maintainers\n don't repeat this discovery.\n\nFollow-up (longer term): file an upstream alchemy issue to support CI\nbootstrapping (maybe via an explicit `bunx alchemy state-store deploy`\nsubcommand), so projects don't need a one-shot human interaction.\n\nRefs:\n- main HEAD: 0f95da6f (Deploy Web Run 25107110360 — failed with this error)\n- comment in packages/infra/src/lib/deploy.ts:140-141 already documents this\n expectation: \"deployed on first interactive use; CI relies on it existing\"","status":"closed","priority":0,"issue_type":"bug","owner":"me@cooperm.com","created_at":"2026-04-29T11:53:57Z","created_by":"Cooper Maruyama","updated_at":"2026-04-30T01:34:16Z","closed_at":"2026-04-30T01:34:16Z","close_reason":"State-store Worker provisioned manually; CI deploys can now use Cloudflare.state() (verified by user 2026-04-29)","dependencies":[{"issue_id":"stackpanel-8yl","depends_on_id":"stackpanel-04d","type":"discovered-from","created_at":"2026-04-29T04:53:57Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":0,"dependent_count":0,"comment_count":0} +{"id":"stackpanel-49t","title":"Restore .open-next/cache asset overlay once alchemy@2 supports AssetsProps.sources","description":"## Update 2026-04-29: deeper root cause + scope expansion\n\n**Original framing** (cache overlay missing) was wrong. After the auth issues unblocked (PR #15 + #17), the actual Docs deploy failure surfaced and it's a structural alchemy@2 + OpenNext bundling incompatibility, not a missing-cache thing. Symptoms:\n\n UnknownCloudflareError: Uncaught TypeError: Cannot destructure property\n 'name' of '(intermediate value)' as it is undefined.\n at worker.js:1:23445 in createGenericHandler\n\nThis happens during Cloudflare's Worker validation step (the platform spins up the script in an isolate to verify it parses + initializes). It rejects the upload before it ever serves a request, so the deploy fails hard.\n\n### Root cause\n\n`@opennextjs/aws/dist/core/createGenericHandler.js` calls:\n\n const { name, wrapper } = await resolveWrapper(override?.wrapper);\n\n`resolveWrapper` defaults to `import(\"../overrides/wrappers/aws-lambda.js\")`, with a comment that says \"This will be replaced by the bundler\" — meaning the OpenNext build pipeline expects its own bundler (esbuild via `@opennextjs/cloudflare`) to swap that import for the Cloudflare-specific wrapper at build time.\n\n`apps/docs/.open-next/worker.js` is produced by `bunx opennextjs-cloudflare build`. That file is correct on its own — it's the OpenNext-bundled Workers module that wrangler happily uploads (cf. `apps/docs/wrangler.jsonc` already in the tree).\n\n**alchemy@2's `Cloudflare.Worker`** then re-bundles `props.main` through `@distilled.cloud/cloudflare-rolldown-plugin` (see `node_modules/.bun/alchemy@2.0.0-beta.20+…/node_modules/alchemy/src/Cloudflare/Workers/Worker.ts:1231-1265` — `prepareBundle`). Even with `isExternal: true` (which IS a real flag: `Platform.ts:23-29`, and skips the Effect-wrapping wrapper script), `buildBundle()` still runs rolldown over the input. That re-bundle either:\n\n - drops the OpenNext `import(\"./open-next.config.mjs\")` dynamic import\n (so `handlerConfig` is undefined, then `override?.wrapper` is undefined,\n then `resolveWrapper(undefined)` walks the default branch and tries to\n import `../overrides/wrappers/aws-lambda.js` from a path that no longer\n exists in the rebuilt bundle); OR\n - tree-shakes / mis-resolves the swap that OpenNext's own bundler put in\n place at first build, so `m_1.default` ends up undefined.\n\nEither way, `await resolveWrapper(...)` returns `undefined` and the destructure on `{ name, wrapper }` throws.\n\n### Why this only surfaced now\n\nUntil today, every Docs deploy on every branch failed *before* the upload step:\n - Pre-PR #16: deploys used the old `alchemy-effect` API + 0.81-style Worker wrapping; the OpenNext-cache vendored overlay was in place.\n - PR #16 (alchemy@2 migration): introduced this regression but every deploy died with `Unauthorized` (under-scoped CF token) before reaching upload.\n - PR #17 + the cherry-pick on main fixed the token; main's deploys then died with `State store not found` (`stackpanel-8yl`).\n - PR #15's preview run was the first deploy in the alchemy@2 era to (a) succeed at auth and (b) bypass the state-store check (preview uses `localState()`). It's the first run ever to call PUT /workers/scripts; that's when this error surfaced.\n\nSo this is a NEW failure mode introduced by PR #16, not a pre-existing condition.\n\n### Severity\n\nBumping to P0. Without a fix, no Docs deploy can succeed — neither preview nor production. This is independent from `stackpanel-8yl` (state-store bootstrap) but stacks on top of it: even after the state store is provisioned, main's Docs deploy will fail here.\n\n### Workaround options (cheapest first)\n\n1. **Bypass alchemy for the Docs Worker upload, keep alchemy for asset upload + custom domain.**\n `apps/docs/alchemy.run.ts` already imports `@distilled.cloud/cloudflare/workers` (it uses `Workers.listDomains`/`putDomain`/`deleteDomain` for the custom-hostname binding). We can call `Workers.putScript` directly with the pre-built `.open-next/worker.js` content + the assets binding, skipping `Cloudflare.Worker` entirely. Pro: minimal moving parts; alchemy still owns the rest of the deploy. Con: foregoes alchemy's typing/lifecycle for the Worker itself; need to manually wire bindings.\n\n2. **Replace the Docs deploy with `bunx wrangler deploy` in `.github/workflows/deploy-docs.yaml`.**\n `apps/docs/wrangler.jsonc` is already current and correct (compatibility flags, `assets.directory`, `assets.binding`, `not_found_handling`, `html_handling`). Custom hostname can be set via `wrangler.jsonc#routes` or `bunx wrangler deployments create`. Pro: smallest code change, uses the supported toolchain. Con: docs deploys diverge from web/api (which still use alchemy@2).\n\n3. **File upstream and wait.**\n File an issue against alchemy@2 for \"support pre-bundled OpenNext-style Workers without re-bundling\". Probably the actual fix is a `props.bundle: false` opt-out or honoring `isExternal` more aggressively. Pro: long-term correct. Con: blocks Docs deploys indefinitely.\n\n### Acceptance\n\n- One Docs deploy reaches a healthy serving state on `docs.stackpanel.com` (production) and `docs.\u003cpr-N\u003e.stackpanel.com` (preview).\n- The chosen workaround is documented in `apps/docs/alchemy.run.ts` (or replaced by wrangler) so the next maintainer doesn't re-discover this.\n- A separate issue is filed if the fix is \"wait for upstream alchemy\".\n\n### Refs\n\n- alchemy@2 source: `node_modules/.bun/alchemy@2.0.0-beta.20+1f3167790a4c81b5/node_modules/alchemy/src/Cloudflare/Workers/Worker.ts:1231-1265`\n- OpenNext source: `node_modules/.bun/@opennextjs+aws@3.10.1+44325935d690066f/node_modules/@opennextjs/aws/dist/core/{createGenericHandler.js,resolve.js}`\n- Last green Docs deploy: `514265ca` (PR #16 branch tip, stage `pr-16` → localState, but pre-merge so it never re-bundled the .open-next output the same way main does).\n- Failed deploys after PR #16 merge: `66b3e57b`, `d97359fb` (auth-masked), `cf01361c`, `0f95da6f` (state-store-masked).\n- PR #15 head deploy (auth + state-store both clear): `7b62e2c2` — first run to surface the `createGenericHandler` error.\n\n### Remaining `stackpanel-49t` original scope (asset cache overlay)\n\nStill valid, but downgraded to \"follow-up after the deploy actually works\". When alchemy@2 grows back the `AssetsProps.sources` API, restore the `.open-next/cache` overlay per the comments in `apps/docs/alchemy.run.ts:55-66`.","notes":"\n---\n\n## 2026-04-29: Upstream patch ready\n\n**State-store** (`stackpanel-8yl`): user bootstrapped manually. Deploy Web re-run on main commit `0f95da6f` is now ✅ success — confirmed prod deploy path works once the state-store Worker exists.\n\n**Docs blocker (this issue)**: patch prepared on a fork branch of `alchemy-run/alchemy-effect`, ready for the user to open upstream.\n\n- Repo: `git@github.com:alchemy-run/alchemy-effect.git` (cloned at `~/git/darkmatter/alchemy-effect`).\n- Branch: `claude/cf-worker-bundle-false` (1 commit on top of `main`).\n- Patch file: `~/.cursor/worktrees/stackpanel/xrm9/scratch/alchemy-patches/0001-feat-cloudflare-Worker-add-bundle-false-to-upload-ma.patch`\n- Files touched:\n - `packages/alchemy/src/Cloudflare/Workers/Worker.ts` (+47/-0)\n - `packages/alchemy/test/Cloudflare/Workers/PrepareBundle.test.ts` (new, 108 lines)\n - `packages/alchemy/test/Cloudflare/Workers/preBundledWorker.mjs` (new, 15 lines, fixture)\n\n**Design**: adds `WorkerProps.bundle?: boolean` (default `true`). When `false`, `prepareBundle` reads `props.main` directly and returns a synthetic single-file `Bundle.BundleOutput` (no rolldown). This is purely additive — existing callers are unaffected.\n\n**Test**: integration test deploys the hand-written ESM fixture and asserts `worker.hash?.bundle === sha256(sourceBytes)`, which is only satisfiable when alchemy skipped the rolldown step. Also round-trips a request through the live Worker.\n\n**Typecheck**: `bunx tsc -b packages/alchemy/tsconfig.test.json` passes on the patched files. Tip-of-main has 4 pre-existing TS errors in `test/Sidecar/{TestClient,TestServer}.ts` unrelated to this patch (verified with patch stashed away).\n\n**To unblock stackpanel here**: once the upstream PR merges and a new alchemy beta is published, bump the `alchemy` catalog version, set `bundle: false` in `apps/docs/alchemy.run.ts`, and uncomment the `assets.sources` cache overlay (or migrate to whatever asset-cache API alchemy@2 settles on).","status":"open","priority":0,"issue_type":"feature","owner":"me@cooperm.com","created_at":"2026-04-29T09:17:00Z","created_by":"Cooper Maruyama","updated_at":"2026-04-30T01:53:00Z","dependencies":[{"issue_id":"stackpanel-49t","depends_on_id":"stackpanel-r7g","type":"discovered-from","created_at":"2026-04-29T02:17:00Z","created_by":"Cooper Maruyama","metadata":"{}"}],"comments":[{"id":"019dd909-e48a-7687-9a8f-1aac7f21fe0a","issue_id":"stackpanel-49t","author":"Cooper Maruyama","text":"Severity bump from regression-only to hard breakage. With current state, every Docs deploy on every branch (including main) fails Worker create with:\n\n UnknownCloudflareError: Uncaught TypeError: Cannot destructure property 'name' of '(intermediate value)' as it is undefined.\n at worker.js:1:23445 in createGenericHandler\n\nLatest reproductions:\n- main @ d97359fb (chore: rekey) — failed\n- main @ 66b3e57b (Merge PR #16 alchemy@2) — failed\n- claude/demo-via-project-swap @ 7b62e2c2 — failed (Run 25106523090)\n\nWeb deploys are unaffected and pass cleanly with the rotated CF token + regenerated embedded payload (see PR #15). Only Docs is broken, and the trace points at the Worker bundle itself, not at alchemy/CF auth — supporting Option B/C over A: even if upstream alchemy ships AssetsProps.sources, the createGenericHandler/DO-name destructure error is a separate failure to chase. Recommend bisecting the Docs Worker build (.open-next/worker.js) between the last-green Docs deploy and the alchemy@2 migration to find the exact regression site.","created_at":"2026-04-29T11:39:57Z"}],"dependency_count":0,"dependent_count":0,"comment_count":1} +{"id":"stackpanel-04d","title":"codegen: regenerate embedded SOPS payloads on .sops.yaml edit + rekey","description":"The runtime alchemy deploy reads from the codegen-emitted module\n\\`packages/gen/env/src/runtime/generated-payloads/_envs/deploy.ts\\` (and its\ncompanion \\`data/_envs/deploy.sops.json\\`), not from\n\\`.stack/secrets/vars/shared.sops.yaml\\` directly.\n\nToday, those embedded payloads only get regenerated when the devshell hook\nruns codegen. That means edits to the source SOPS YAML — including the\ncommon \\`chore: rekey\\` flow and ad-hoc \\`sops set\\` rotations — silently\ndrift from what production deploys actually use. We hit this in PR-15 and\nPR-17: the Cloudflare API token rotation went into the source YAML and was\neven merged to main, but the embedded payload at HEAD continued decrypting\nto the old, under-scoped token, so every CI deploy after the rotation kept\n401-ing on \\`Cloudflare.Worker\\` create.\n\nFix options to consider (pick one in design):\n\n- Make the SOPS edit path (custom \\`sops\\` wrapper / hook) auto-run codegen\n for any \\`packages/gen/env/data/**.sops.*\\` or \\`.stack/secrets/**.sops.*\\`\n edit, and refuse to commit if the embedded payloads are stale.\n- Add a pre-commit hook that re-runs codegen and stages the resulting\n embedded files (mirrors the existing oxlint/format hook surface).\n- Or move the runtime to read the source SOPS YAML directly at deploy time\n instead of an embedded snapshot — eliminates the drift class entirely\n but is a much bigger change.\n\nAcceptance:\n- After any edit to \\`.stack/secrets/vars/*.sops.yaml\\` or rekey, the\n embedded deploy/app payloads under \\`packages/gen/env/\\` are guaranteed\n to be regenerated before the change can land on main.\n- A regression test (or CI check) fails the build if the source-derived\n plaintext for any secret diverges from what the embedded payload\n decrypts to with the same recipients.","status":"open","priority":1,"issue_type":"bug","owner":"me@cooperm.com","created_at":"2026-04-29T11:35:28Z","created_by":"Cooper Maruyama","updated_at":"2026-04-29T11:35:28Z","dependencies":[{"issue_id":"stackpanel-04d","depends_on_id":"stackpanel-49t","type":"discovered-from","created_at":"2026-04-29T04:35:28Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":0,"dependent_count":0,"comment_count":0} {"id":"stackpanel-r7g","title":"Fix broken bun install on main: alchemy-effect catalog reference","description":"Five packages reference `alchemy-effect: catalog:` (apps/api, apps/docs, apps/web, packages/db, packages/infra) but the root package.json#workspaces.catalog has no alchemy-effect entry. Result: bun install --frozen-lockfile fails with 'alchemy-effect@catalog: failed to resolve' on a clean clone of main.\n\nReproduction:\n rm -rf node_modules\n bun install --frozen-lockfile\n # error: alchemy-effect@catalog: failed to resolve (x5)\n\nRoot cause: introduced in commit dda9c459 'refactor: replace AWS EC2 infra with Cloudflare Workers + Neon, add agenix module' — the dep was added to packages but the catalog entry was never added.\n\nFix: add \"alchemy-effect\": \"^0.12.0\" to root package.json#workspaces.catalog (bun.lock already resolves alchemy-effect@0.12.0).\n\nSurfaced while working on PR #15 — could not run vite to regen routeTree.gen.ts after route deletions, had to hand-edit. Worktree node_modules from before the regression still work, masking the issue locally.","status":"closed","priority":1,"issue_type":"bug","owner":"me@cooperm.com","created_at":"2026-04-29T08:31:32Z","created_by":"Cooper Maruyama","updated_at":"2026-04-29T09:16:42Z","closed_at":"2026-04-29T09:16:42Z","close_reason":"Fixed by PR #16 — migrated workspace to alchemy@2.0.0-beta.20 (alchemy-effect rebrand). bun install now resolves cleanly and lockfile regenerates without the catalog miss.","dependency_count":0,"dependent_count":0,"comment_count":0} {"id":"stackpanel-os2.8","title":"Add Hetzner provision regression test with ephemeral instances","description":"Add a reproducible regression test for stackpanel provision using ephemeral Hetzner Cloud instances created on-demand via the hcloud API. The token already exists in SOPS as hetzner_api_key. The implementation should add the hcloud CLI to the devshell, create a disposable-machine test script, load hetzner_api_key from SOPS into HCLOUD_TOKEN, inject a temporary machine via .stack/config.local.nix, run stackpanel provision against it, verify the resulting NixOS host, and always clean up the instance.","design":"Prefer a real end-to-end infrastructure regression test over mocks for the final provision path, but keep verification safe and deterministic where possible. Use existing shell smoke test patterns for script structure and use .stack/config.local.nix for the highest-priority temporary machine override.","acceptance_criteria":"- hcloud is available in the devshell\n- tests/provision-hetzner-e2e.sh provisions an ephemeral CX22 in fsn1 from Debian 12\n- The script exports HCLOUD_TOKEN from the SOPS key hetzner_api_key\n- The script injects machine config via .stack/config.local.nix and cleans up in a trap\n- Justfile exposes a command to run the regression test and a dry-run mode","status":"closed","priority":1,"issue_type":"task","assignee":"Cooper Maruyama","owner":"me@cooperm.com","created_at":"2026-03-29T08:03:28Z","created_by":"Cooper Maruyama","updated_at":"2026-03-29T08:13:29Z","closed_at":"2026-03-29T08:13:29Z","close_reason":"Implemented: added hcloud to devshell, tests/provision-hetzner-e2e.sh, and Justfile entries. Commit d54bdbc7.","labels":["deployment","hetzner","testing"],"dependencies":[{"issue_id":"stackpanel-os2.8","depends_on_id":"stackpanel-os2","type":"parent-child","created_at":"2026-03-29T01:03:27Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":0,"dependent_count":0,"comment_count":0} {"id":"stackpanel-foe.4","title":"P4: Green test matrix on stackpanel","description":"Real deploys (not dry-run) of {docs,web} x {colmena,nixos-rebuild,fly} on stackpanel infra. Ensure docs and web have Nix packages that build to deployable artifacts. Deploy to ovh-usw-1 (direct) for NixOS backends. Add nix flake check validation for deployment outputs. All 6 cells must go green.","status":"open","priority":1,"issue_type":"task","owner":"me@cooperm.com","created_at":"2026-03-28T20:39:32Z","created_by":"Cooper Maruyama","updated_at":"2026-03-28T20:39:32Z","dependencies":[{"issue_id":"stackpanel-foe.4","depends_on_id":"stackpanel-foe","type":"parent-child","created_at":"2026-03-28T13:39:32Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-foe.4","depends_on_id":"stackpanel-foe.2","type":"blocks","created_at":"2026-03-28T13:47:41Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-foe.4","depends_on_id":"stackpanel-foe.3","type":"blocks","created_at":"2026-03-28T13:47:42Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":2,"dependent_count":2,"comment_count":0} @@ -44,7 +47,6 @@ {"id":"stackpanel-os2.5","title":"Add stackpanel provision --new and config round-trip machine authoring","description":"apps/stackpanel-go/cmd/cli/provision.go handles provisioning for machines that already exist in config, but the provisioning design also calls for a --new workflow that can author a minimal machine entry and preserve Nix path literals for hardwareConfig/diskLayout updates. Add that machine-authoring path so new-machine setup is not a manual edit-before-provision step.","design":"Reuse the repo's existing config-writing/serialization patterns instead of inventing a new config mutator; add tagged path handling if necessary to preserve Nix path types.","acceptance_criteria":"- stackpanel provision --new \u003cname\u003e --host \u003ctarget\u003e creates a minimal machine entry in the canonical Stackpanel config\n- hardwareConfig and diskLayout paths round-trip as Nix path literals instead of quoted absolute strings\n- The provision flow can update the new machine entry after generating hardware config\n- Add tests for config edit / serialization behavior","status":"closed","priority":2,"issue_type":"task","owner":"me@cooperm.com","created_at":"2026-03-28T15:02:37Z","created_by":"Cooper Maruyama","updated_at":"2026-03-28T20:19:21Z","closed_at":"2026-03-28T20:19:21Z","close_reason":"Dropped: manual config editing is acceptable, provision --new deferred indefinitely","external_ref":"https://linear.app/darkmatterlabs/issue/ENG-382","labels":["deployment"],"dependencies":[{"issue_id":"stackpanel-os2.5","depends_on_id":"stackpanel-os2","type":"parent-child","created_at":"2026-03-28T08:02:36Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-os2.5","depends_on_id":"stackpanel-os2.1","type":"blocks","created_at":"2026-03-28T08:02:40Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":1,"dependent_count":2,"comment_count":0} {"id":"stackpanel-os2.6","title":"Wire deploy/provision state into the Studio Deploy panel","description":"apps/web/src/components/studio/panels/deploy/deploy-panel.tsx is still Colmena-centric and does not appear to consume the CLI state tracked in .stack/state/deployments.json and .stack/state/machines.json. Update the Studio deploy experience so it reflects the same deploy/provision model and status that the CLI writes.","design":"Expose deploy/provision state through the agent/web API rather than teaching the browser to read local state files directly.","acceptance_criteria":"- The Deploy panel shows machine provisioning state and last deploy state from the supported agent/CLI APIs\n- Users can trigger deploy/provision actions from the panel with clear loading, success, and error states\n- Unsupported or partially configured backends degrade gracefully in the UI\n- Add frontend or integration coverage for the key panel states","status":"closed","priority":2,"issue_type":"task","owner":"me@cooperm.com","created_at":"2026-03-28T15:02:37Z","created_by":"Cooper Maruyama","updated_at":"2026-03-28T20:19:29Z","closed_at":"2026-03-28T20:19:29Z","close_reason":"Superseded by pluggable-deploy-backends restructure. Work absorbed into new phase-based tasks. See openspec/changes/pluggable-deploy-backends/","external_ref":"https://linear.app/darkmatterlabs/issue/ENG-383","labels":["deployment"],"dependencies":[{"issue_id":"stackpanel-os2.6","depends_on_id":"stackpanel-os2","type":"parent-child","created_at":"2026-03-28T08:02:37Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-os2.6","depends_on_id":"stackpanel-os2.3","type":"blocks","created_at":"2026-03-28T08:02:40Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-os2.6","depends_on_id":"stackpanel-os2.4","type":"blocks","created_at":"2026-03-28T08:02:41Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-os2.6","depends_on_id":"stackpanel-os2.5","type":"blocks","created_at":"2026-03-28T08:02:41Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":3,"dependent_count":1,"comment_count":0} {"id":"stackpanel-zhq","title":"Remove now-obsolete actions/cache@v4 of apps/{web,docs}/.alchemy from deploy workflows","description":"In the alchemy-effect → alchemy@2 migration (PR #16), all 5 deploy stacks switched from filesystem-based LocalState to Cloudflare-hosted state via Cloudflare.state(). The .alchemy/state/ directory is no longer used at deploy time.\n\nThe deploy workflows still cache it as a no-op:\n\n .github/workflows/deploy-web.yaml — Restore alchemy state (actions/cache@v4 on apps/web/.alchemy)\n .github/workflows/deploy-docs.yaml — Restore alchemy state (actions/cache@v4 on apps/docs/.alchemy)\n destroy job — actions/cache/restore@v4 of the same paths\n destroy job — Delete cached alchemy state (gh cache delete) cleanup\n\nPlus the explanatory comment block above each cache step describing the LocalState pattern is now misleading.\n\nCleanup:\n- Drop the cache@v4 + cache/restore@v4 steps from both workflows\n- Drop the gh cache delete cleanup step in the destroy jobs\n- Update or remove the now-misleading 'Persist alchemy's LocalState' comment blocks\n- Verify deploy still works without the cache (the Cloudflare state store is the new source of truth and is self-bootstrapping per Cloudflare.state())\n\nShould land after Cloudflare.state() is verified working in CI (depends on stackpanel-r7g / PR #16).","status":"open","priority":3,"issue_type":"chore","owner":"me@cooperm.com","created_at":"2026-04-29T09:17:13Z","created_by":"Cooper Maruyama","updated_at":"2026-04-29T09:17:13Z","dependencies":[{"issue_id":"stackpanel-zhq","depends_on_id":"stackpanel-r7g","type":"discovered-from","created_at":"2026-04-29T02:17:13Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":0,"dependent_count":0,"comment_count":0} -{"id":"stackpanel-49t","title":"Restore .open-next/cache asset overlay once alchemy@2 supports AssetsProps.sources","description":"In the alchemy-effect → alchemy@2 migration (PR #16), we deleted the vendored OpenNext asset overlay (vendor/alchemy-effect-opennext-overlay/, scripts/apply-alchemy-effect-opennext-assets.ts, root postinstall hook) because:\n\n1. It was tied to alchemy-effect@0.12.x's file structure and is incompatible with v2's restructured Worker.ts/Assets.ts\n2. The script's own self-disable path explicitly instructs maintainers to delete the hook when no alchemy-effect@0.12.x installs are found\n\napps/docs/alchemy.run.ts had its assets.sources field commented out with a TODO referencing this issue:\n\n assets: {\n directory: '.open-next/assets',\n // TODO(stackpanel): re-enable the .open-next/cache overlay once\n // alchemy@2 natively supports AssetsProps.sources …\n config: { … },\n }\n\nImpact: OpenNext incremental cache misses for cdn-cgi/_next_cache paths fall back to ISR revalidation. Cache hit-rate regression, not a hard breakage.\n\nscripts/ALCHEMY_EFFECT_OPENNEXT_UPSTREAM.md (also deleted) tracked the upstream PR for AssetsProps.sources support — verify whether it landed in alchemy@2's main branch and is just pending a release, or whether it needs to be re-pitched.\n\nResolution path:\n- Option A: wait for upstream alchemy to ship native AssetsProps.sources, then uncomment the field in apps/docs/alchemy.run.ts\n- Option B: re-vendor a v2-compatible overlay (risky — alchemy@2 has restructured Worker.ts/Assets.ts internals)\n- Option C: switch to a different OpenNext cache strategy that doesn't require the overlay\n\nAcceptance: docs deploy serves cdn-cgi/_next_cache assets from Workers Assets directly (Option A or B), or this issue is closed as won't-fix with a documented alternative.","status":"open","priority":3,"issue_type":"feature","owner":"me@cooperm.com","created_at":"2026-04-29T09:17:00Z","created_by":"Cooper Maruyama","updated_at":"2026-04-29T09:17:00Z","dependencies":[{"issue_id":"stackpanel-49t","depends_on_id":"stackpanel-r7g","type":"discovered-from","created_at":"2026-04-29T02:17:00Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":0,"dependent_count":0,"comment_count":0} {"id":"stackpanel-3vi","title":"Docs: module author guide + marketplace policies","description":"Docs that make it obvious how to build, test, price, and publish a module — plus the policies that keep the marketplace trustworthy.\n\n## Scope\n\n### Author guide (apps/docs/content/docs/modules/)\n- 'Build your first module' — scaffolding, module.nix structure, meta.nix fields, ui.nix if applicable\n- 'Test a module locally' — stackpanel link (local dev), running against sample .stack/config.nix\n- 'Package for publication' — tarball layout, signing, manifest requirements\n- 'Price and publish' — free vs paid tradeoffs, pricing UX tips\n- 'Get paid' — Polar Connect onboarding, tax docs, payout schedule\n- 'Versioning + updates' — semver discipline, deprecation policy\n\n### Policies\n- Acceptable use: no crypto miners, no telemetry without disclosure, no license keys hardcoded\n- Refund policy: 14-day no-questions-asked (author can opt into stricter)\n- Takedown policy: security issues → emergency delist within 24h\n- Revenue share + fee structure (the 15% sticker, transparent)\n- Intellectual property: developer retains ownership, grants distribution license","acceptance_criteria":"- Author guide builds with apps/docs\n- Policies are linked from dev portal's publish flow\n- Sample module repo referenced from the 'first module' page","status":"open","priority":3,"issue_type":"task","owner":"me@cooperm.com","created_at":"2026-04-24T03:45:46Z","created_by":"Cooper Maruyama","updated_at":"2026-04-24T03:45:46Z","dependencies":[{"issue_id":"stackpanel-3vi","depends_on_id":"stackpanel-02c","type":"blocks","created_at":"2026-04-23T20:46:16Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-3vi","depends_on_id":"stackpanel-c7t","type":"blocks","created_at":"2026-04-23T20:46:15Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-3vi","depends_on_id":"stackpanel-w3r","type":"blocks","created_at":"2026-04-23T20:46:17Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":3,"dependent_count":1,"comment_count":0} {"id":"stackpanel-l1q","title":"Module review workflow + automated Nix static analysis","description":"Prevent malicious or broken modules from reaching users. MVP manual, Phase 2 automated.\n\n## Scope\n\n### MVP: manual review\n- Admin tool (packages/api route + studio admin panel) showing pending listings\n- Reviewer sees: uploaded tarball contents, diff from previous version (if any), links to GitHub repo, automated scan results\n- Approve → listing goes live; Reject → listing status updated with reason visible to author\n- SLA target: 3 business days for initial review\n\n### Phase 2: automated scans\n- Static-analysis pass over module.nix + meta.nix:\n - Flag: import-from-derivation without explicit opt-in\n - Flag: builtins.fetchurl with non-allowlisted host\n - Flag: arbitrary path reads outside module dir\n - Flag: network calls during eval\n- Feed findings into review UI; author sees them pre-submit\n- Optionally: automatic 'verified pure' badge for modules with zero findings\n\n## Why not AI review\n\nPattern-match is more reliable for this than an LLM for the boring 'did they try to phone home during eval' checks. LLM review can come later for README/security claims.","acceptance_criteria":"- Reviewer can approve/reject pending listings\n- Rejected listings show reason to author with re-submit path\n- Static analysis surfaces known-bad patterns in a handful of test cases","status":"open","priority":3,"issue_type":"task","owner":"me@cooperm.com","created_at":"2026-04-24T03:45:37Z","created_by":"Cooper Maruyama","updated_at":"2026-04-24T03:45:37Z","dependencies":[{"issue_id":"stackpanel-l1q","depends_on_id":"stackpanel-c7t","type":"blocks","created_at":"2026-04-23T20:46:15Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":1,"dependent_count":1,"comment_count":0} {"id":"stackpanel-02c","title":"Developer payout: Polar Connect + KYC onboarding","description":"Pay developers their accrued balance via Polar Connect (Stripe Connect underneath), with KYC + tax form collection at onboarding.\n\n## Scope\n\n- Onboarding flow: first time creating a paid listing → prompt to connect Polar Connect account (redirect OAuth flow)\n- Collect tax info (W-9 US / W-8BEN international) via Polar's Connect UI\n- Payout job (scheduled): once per month, for each developer with balance \u003e= $50, trigger Polar payout; record payout_event(developer_id, amount_cents, polar_transfer_id, status)\n- Emails: onboarding done, first sale, monthly statement\n- Admin tool for manual payout holds (fraud, chargeback disputes)\n\n## Phase 1 fallback\n\nIf Polar Connect isn't ready: accumulate balances, issue manual Wise transfers quarterly while we collect via email. Works for ~20 developers, not for scale.","acceptance_criteria":"- Developer can connect payout account end-to-end\n- Monthly payout runs successfully against test Polar env\n- Balance decrements match transferred amount\n- Tax forms captured before first payout","status":"open","priority":3,"issue_type":"task","owner":"me@cooperm.com","created_at":"2026-04-24T03:45:28Z","created_by":"Cooper Maruyama","updated_at":"2026-04-24T03:45:28Z","dependencies":[{"issue_id":"stackpanel-02c","depends_on_id":"stackpanel-24e","type":"blocks","created_at":"2026-04-23T20:46:13Z","created_by":"Cooper Maruyama","metadata":"{}"},{"issue_id":"stackpanel-02c","depends_on_id":"stackpanel-c7t","type":"blocks","created_at":"2026-04-23T20:46:14Z","created_by":"Cooper Maruyama","metadata":"{}"}],"dependency_count":2,"dependent_count":2,"comment_count":0} diff --git a/package.json b/package.json index 672bf1b2..8b5970e1 100644 --- a/package.json +++ b/package.json @@ -110,5 +110,8 @@ "@effect/platform-node": "4.0.0-beta.48", "@effect/platform-node-shared": "4.0.0-beta.48", "@effect/platform-bun": "4.0.0-beta.48" + }, + "patchedDependencies": { + "alchemy@2.0.0-beta.20": "patches/alchemy@2.0.0-beta.20.patch" } } diff --git a/patches/alchemy@2.0.0-beta.20.patch b/patches/alchemy@2.0.0-beta.20.patch new file mode 100644 index 00000000..c4ab0da8 --- /dev/null +++ b/patches/alchemy@2.0.0-beta.20.patch @@ -0,0 +1,72 @@ +diff --git a/src/Cloudflare/Workers/Worker.ts b/src/Cloudflare/Workers/Worker.ts +index 66a75caf7255e6e1d6bcbf0a2cb28ba3c4c865e5..d48bbc07c7742cfb1fc40da3036d57067dbda524 100644 +--- a/src/Cloudflare/Workers/Worker.ts ++++ b/src/Cloudflare/Workers/Worker.ts +@@ -31,6 +31,7 @@ import { + import { hashDirectory, type MemoOptions } from "../../Build/Memo.ts"; + import * as Bundle from "../../Bundle/Bundle.ts"; + import { findCwdForBundle } from "../../Bundle/TempRoot.ts"; ++import { sha256 } from "../../Util/sha256.ts"; + import type { ScopedPlanStatusSession } from "../../Cli/Cli.ts"; + import { isResolved } from "../../Diff.ts"; + import type { HttpEffect } from "../../Http.ts"; +@@ -284,6 +285,30 @@ export interface WorkerProps< + */ + pure?: Bundle.BundleExtraOptions["pure"]; + }; ++ /** ++ * When `false`, skip alchemy's rolldown step and upload `main` to ++ * Cloudflare byte-for-byte. ++ * ++ * Use this when `main` already points at a complete, runtime-ready ++ * Workers ESM bundle produced by an external tool (for example, ++ * OpenNext, wrangler, or a custom build pipeline) and the bundle ++ * must not be re-processed by rolldown. ++ * ++ * Re-bundling such artifacts is unsafe: the dynamic `import()` calls ++ * the upstream tool relies on can be rewritten in ways that break ++ * runtime behavior (a common symptom is OpenNext failing inside ++ * `createGenericHandler` when its wrapper resolver returns ++ * `undefined`). ++ * ++ * `bundle: false` is intended for use with the ++ * `yield* Cloudflare.Worker("id", { main, bundle: false })` form, ++ * where {@link PlatformProps.isExternal | `isExternal: true`} is ++ * already inferred automatically. It can also be combined with an ++ * explicit `isExternal: true` if needed. ++ * ++ * @default true ++ */ ++ bundle?: boolean; + } + + export type Worker = Resource< +@@ -1234,6 +1259,28 @@ export const WorkerProvider = () => + const cwd = yield* findCwdForBundle(main); + const { compatibilityDate, compatibilityFlags } = + getCompatibility(props); ++ ++ // bundle: false → upload `main` to Cloudflare as-is. ++ // ++ // Pre-bundled artifacts (OpenNext, wrangler output, etc.) are ++ // intentionally complete; running them through rolldown a second ++ // time can rewrite dynamic imports in ways that break the ++ // upstream tool's runtime resolution. ++ if (props.bundle === false) { ++ const content = yield* fs.readFile(main); ++ const hash = yield* sha256(content); ++ return { ++ files: [ ++ { ++ path: path.basename(main), ++ content, ++ hash, ++ }, ++ ], ++ hash, ++ } satisfies Bundle.BundleOutput; ++ } ++ + const buildBundle = (plugins?: rolldown.RolldownPluginOption) => + Bundle.build( + { From 7d65a03309eaa4575985b8aa23b677611aec3be4 Mon Sep 17 00:00:00 2001 From: Cooper Maruyama Date: Thu, 30 Apr 2026 00:32:05 -0700 Subject: [PATCH 2/4] fix(docs): pre-bundle worker.js with wrangler before alchemy upload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow-up to the previous commit. The `bundle: false` opt-out was the right opt-out, but I had a wrong mental model of `.open-next/worker.js`: it's a ~2KB OpenNext entrypoint that *expects* to be passed through a wrangler-style bundler that resolves the relative `./cloudflare/*.js` imports and inlines them. Uploading it byte-for-byte fails with `Uncaught Error: No such module "cloudflare/images.js" — imported from "worker.js"`. The PR #20 first attempt confirmed the patch *itself* works (CI got past the rolldown step and uploaded 2.22 KB to Cloudflare); just the file content was incomplete. So: - `bun run build:worker` now also runs `wrangler deploy --dry-run --outdir=.open-next/dist`, which produces a self-contained `.open-next/dist/worker.js` (~10 MiB gzipped — under CF's limit). Wrangler bundles statics and *preserves* the runtime `import()` calls the way OpenNext expects. - `apps/docs/alchemy.run.ts` points `main:` at `.open-next/dist/worker.js` and keeps `bundle: false` so alchemy uploads the wrangler artifact byte-for-byte instead of running rolldown again. Net effect: the same `bunx alchemy deploy` that was 500-ing on every `/docs/*` route should now serve the real fumadocs UI. --- apps/docs/alchemy.run.ts | 38 ++++++++++++++++++++++++++------------ apps/docs/package.json | 2 +- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/apps/docs/alchemy.run.ts b/apps/docs/alchemy.run.ts index 773014ff..f26e8389 100644 --- a/apps/docs/alchemy.run.ts +++ b/apps/docs/alchemy.run.ts @@ -40,29 +40,43 @@ const program = Effect.gen(function* () { // The build is expected to have already run (`bun run build:worker`); this // resource only handles upload + binding wiring. const website = yield* Cloudflare.Worker("Docs", { - main: ".open-next/worker.js", + // `.open-next/worker.js` is OpenNext's tiny ~2KB entrypoint — it expects to + // be passed through a wrangler-style bundler that resolves the relative + // `./cloudflare/*.js` imports and inlines them. Two viable bundlers: + // + // 1. wrangler (esbuild under the hood) — bundles statics, *preserves* + // runtime `import()` paths. This is what `opennextjs-cloudflare deploy` + // uses internally and what OpenNext is designed against. + // 2. alchemy's built-in cloudflareRolldown — also bundles statics, but + // mangles OpenNext's dynamic `import("./server-functions/default/ + // handler.mjs")` so its `resolveWrapper(...)` returns `undefined` at + // request time. The deployed Worker then throws + // `TypeError: Cannot destructure property 'name' of '(intermediate + // value)'` + // inside `createGenericHandler` and every dynamic Next route + // (`/docs/*`, …) returns 500. Static routes (`/`, `/api/search`) + // survive because they're served by the ASSETS binding without + // entering the broken handler. + // + // We pre-bundle with wrangler in `bun run build:worker` + // (`wrangler deploy --dry-run --outdir=.open-next/dist`) and point + // `main:` at the resulting self-contained file, then tell alchemy to skip + // its own bundling pass with `bundle: false` so the byte-for-byte upload + // is the wrangler artifact. + main: ".open-next/dist/worker.js", // OpenNext emits a plain Workers default export `{ fetch }` — the alchemy // bootstrap that wraps `main` in `Layer.effect(tag, entry)` mis-handles // that shape and the deployed worker throws CF 1101 on first request. // `isExternal: true` skips the wrapper so the bundle keeps OpenNext's own // entrypoint. isExternal: true, - // `isExternal: true` is not enough — alchemy's `prepareBundle` still runs - // `.open-next/worker.js` through `cloudflareRolldown`, which rewrites the - // dynamic `import("./server-functions/default/handler.mjs")` (and friends) - // in ways that make OpenNext's `resolveWrapper(...)` return `undefined` - // at request time. The deployed Worker then throws - // `TypeError: Cannot destructure property 'name' of '(intermediate value)'` - // inside `createGenericHandler`, and every dynamic Next route (`/docs/*`, - // …) returns 500. Static routes (`/`, `/api/search`) keep working because - // they don't reach the wrapper resolver. - // // The `bundle: false` opt-out is added by patches/alchemy@2.0.0-beta.20.patch // (a backport of the proposed upstream change at // https://github.com/alchemy-run/alchemy-effect — the // `feat(cloudflare/Worker): add bundle: false …` commit). It short- // circuits `prepareBundle` to upload `props.main` byte-for-byte. Drop the - // patch + this prop once the upstream feature ships in alchemy. + // patch + this prop once cloudflareRolldown's dynamic-import handling is + // fixed upstream and we can bundle through alchemy directly. bundle: false, // Mirror apps/docs/wrangler.jsonc — OpenNext serves its own routing so the // worker must run for missed asset paths, and we want the SPA-style diff --git a/apps/docs/package.json b/apps/docs/package.json index 6acdafcb..eb3f46ae 100644 --- a/apps/docs/package.json +++ b/apps/docs/package.json @@ -50,7 +50,7 @@ "private": true, "scripts": { "build": "next build", - "build:worker": "opennextjs-cloudflare build", + "build:worker": "opennextjs-cloudflare build && wrangler deploy --dry-run --outdir=.open-next/dist", "cf-typegen": "wrangler types --env-interface CloudflareEnv cloudflare-env.d.ts", "deploy": "bun run build:worker && bunx alchemy deploy", "deploy:dev": "APP_ENV=dev bun run build:worker && bunx alchemy deploy --stage dev", From f36c0942c6ec87127181d58766b42f8657dbed58 Mon Sep 17 00:00:00 2001 From: Cooper Maruyama Date: Thu, 30 Apr 2026 00:36:14 -0700 Subject: [PATCH 3/4] fix(docs): minify wrangler dry-run output to fit 64 MiB Worker limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Without --minify, the bundled worker.js was 70.1 MiB uncompressed — just over Cloudflare's 64 MiB uncompressed Workers script limit (the gzipped 10 MiB limit was fine). With --minify, 57.7 MiB uncompressed / 9.1 MiB gzipped. --- apps/docs/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/docs/package.json b/apps/docs/package.json index eb3f46ae..0c004703 100644 --- a/apps/docs/package.json +++ b/apps/docs/package.json @@ -50,7 +50,7 @@ "private": true, "scripts": { "build": "next build", - "build:worker": "opennextjs-cloudflare build && wrangler deploy --dry-run --outdir=.open-next/dist", + "build:worker": "opennextjs-cloudflare build && wrangler deploy --dry-run --outdir=.open-next/dist --minify", "cf-typegen": "wrangler types --env-interface CloudflareEnv cloudflare-env.d.ts", "deploy": "bun run build:worker && bunx alchemy deploy", "deploy:dev": "APP_ENV=dev bun run build:worker && bunx alchemy deploy --stage dev", From 92987d42716e89ff718d0ea2582af3583a640f9b Mon Sep 17 00:00:00 2001 From: Cooper Maruyama Date: Thu, 30 Apr 2026 00:59:58 -0700 Subject: [PATCH 4/4] fix(docs): remove fumadocs-typescript runtime registration (real /docs/* fix) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the actual cause of every `/docs/*` route returning 500 — not the alchemy bundling bug from the previous commits, which was masking the symptom by failing the deploy outright. Once the worker actually deployed, runtime traffic surfaced: GET https://docs.pr-20.stackpanel.com/docs - Ok @ 12:55:10 (error) Error: [unenv] fs.mkdir is not implemented yet! at l6 (worker.js:112349:269) at worker.js:112349:8151 That stack lands on `ts-morph`'s `FileSystemDocumentCache.mkdir`, pulled in transitively via: src/mdx-components.tsx → fumadocs-typescript (createGenerator + AutoTypeTable UI) → fumadocs-typescript/ui (AutoTypeTable React component) → @ts-morph/common + ts-morph (TypeScript compiler API + virtual FS) The `` MDX tag doesn't actually need the runtime component — `remarkAutoTypeTable` in source.config.ts already expands those tags into plain markdown tables at build time, so the rendered output never contains a live `` element. (Verified: `rg AutoTypeTable apps/docs/content` returns no matches.) The runtime registration was dead code. Removing it: - drops ts-morph (~25 MiB) from the runtime worker bundle - drops the request-time `fs.mkdir` call that crashed every dynamic fumadocs page render - shrinks the bundled `worker.js` from 57.7 MiB → 31.0 MiB uncompressed and 9.0 MiB → 4.0 MiB gzipped (well under both Workers limits) If we ever need a *runtime* AutoTypeTable in the future, the cache must be a non-FS implementation (or omitted entirely) so it works under Workers' `nodejs_compat` polyfill. --- apps/docs/src/mdx-components.tsx | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/apps/docs/src/mdx-components.tsx b/apps/docs/src/mdx-components.tsx index aa755faf..46522ade 100644 --- a/apps/docs/src/mdx-components.tsx +++ b/apps/docs/src/mdx-components.tsx @@ -1,13 +1,20 @@ import defaultMdxComponents from "fumadocs-ui/mdx"; import type { MDXComponents } from "mdx/types"; -import { - createGenerator, - createFileSystemGeneratorCache, -} from "fumadocs-typescript"; -import { AutoTypeTable } from "fumadocs-typescript/ui"; import { Files, File, Folder } from "@/components/files"; import type { ReactNode } from "react"; +// Note on ``: the MDX tag is *expanded at build time* by +// `remarkAutoTypeTable` in source.config.ts, so the rendered docs never +// contain a live `` element. Registering the runtime +// component here used to pull `fumadocs-typescript` → `ts-morph` into +// the Cloudflare Worker bundle, which (a) bloats it past the 64 MiB +// uncompressed Workers limit and (b) crashes at request time with +// `[unenv] fs.mkdir is not implemented yet!` from `ts-morph`'s +// `createFileSystemGeneratorCache`. Don't reintroduce a runtime +// `AutoTypeTable` registration unless you're prepared to (1) swap to a +// non-FS cache and (2) verify ts-morph still bundles cleanly under +// `nodejs_compat`. + // NixOption renders stale generated MDX files that still reference . // It reconstructs the original markdown-table layout from the JSX props so the // options reference pages look correct until docs are regenerated. @@ -132,17 +139,9 @@ function NixOptionMeta() { return null; } -const generator = createGenerator({ - // recommended: choose a directory for cache - cache: createFileSystemGeneratorCache(".next/fumadocs-typescript"), -}); - export function getMDXComponents(components?: MDXComponents): MDXComponents { return { ...defaultMdxComponents, - AutoTypeTable: (props) => ( - - ), Files, File, Folder,