diff --git a/README.md.jinja b/README.md.jinja
index 0e19422..01a44e9 100644
--- a/README.md.jinja
+++ b/README.md.jinja
@@ -29,9 +29,9 @@ The backend python workflow and frontend ui share values from this single file.
These apps are built on top of `llama_deploy`, which comes bundled with a `llamactl` cli for serving your workflows as an API, and your app, side by side.
-You can serve it locally with `uv run llamactl serve llama_deploy.local` from within this directory.
+You can serve it locally with `uvx llamactl serve` from within this directory.
-After starting with `llamactl`, visit `http://localhost:4501/deployments/{{package_name}}/ui` to see the UI.
+After starting with `llamactl`, visit `http://localhost:4501/deployments/{{ project_name }}/ui` to see the UI.
## Exporting types
diff --git a/llama_deploy.local.jinja b/llama_deploy.local.jinja
deleted file mode 100644
index 8f74274..0000000
--- a/llama_deploy.local.jinja
+++ /dev/null
@@ -1,25 +0,0 @@
-name: "{{project_name}}"
-
-control-plane:
- port: 8000
-
-default-service: process-file
-
-services:
- process-file:
- name: Process File
- env-files:
- - .env
- source:
- type: local
- name: .
- path: src/{{project_name_snake}}.process_file:workflow
-
-
-ui:
- name: "{{project_name}} UI"
- env-files:
- - .env
- source:
- type: local
- name: ./ui
diff --git a/llama_deploy.yaml.jinja b/llama_deploy.yaml.jinja
index 42f796a..ecf1243 100644
--- a/llama_deploy.yaml.jinja
+++ b/llama_deploy.yaml.jinja
@@ -8,17 +8,11 @@ default-service: process-file
services:
process-file:
name: Process File
- source:
- type: local
- name: .
- sync_policy: merge
path: "src/{{project_name_snake}}.process_file:workflow"
- python-dependencies:
- - "."
+ env-files:
+ - ".env"
ui:
name: "{{project_name}} UI"
source:
- type: local
name: ./ui
- sync_policy: merge
diff --git a/test-proj/README.md b/test-proj/README.md
index 9ce2126..556e21f 100644
--- a/test-proj/README.md
+++ b/test-proj/README.md
@@ -29,9 +29,9 @@ The backend python workflow and frontend ui share values from this single file.
These apps are built on top of `llama_deploy`, which comes bundled with a `llamactl` cli for serving your workflows as an API, and your app, side by side.
-You can serve it locally with `uv run llamactl serve llama_deploy.local` from within this directory.
+You can serve it locally with `uvx llamactl serve` from within this directory.
-After starting with `llamactl`, visit `http://localhost:4501/deployments//ui` to see the UI.
+After starting with `llamactl`, visit `http://localhost:4501/deployments/test-proj/ui` to see the UI.
## Exporting types
diff --git a/test-proj/llama_deploy.local b/test-proj/llama_deploy.local
deleted file mode 100644
index 27f4a2c..0000000
--- a/test-proj/llama_deploy.local
+++ /dev/null
@@ -1,25 +0,0 @@
-name: "test-proj"
-
-control-plane:
- port: 8000
-
-default-service: process-file
-
-services:
- process-file:
- name: Process File
- env-files:
- - .env
- source:
- type: local
- name: .
- path: src/test_proj.process_file:workflow
-
-
-ui:
- name: "test-proj UI"
- env-files:
- - .env
- source:
- type: local
- name: ./ui
diff --git a/test-proj/llama_deploy.yaml b/test-proj/llama_deploy.yaml
index 0e1bd0b..c19c4eb 100644
--- a/test-proj/llama_deploy.yaml
+++ b/test-proj/llama_deploy.yaml
@@ -8,17 +8,11 @@ default-service: process-file
services:
process-file:
name: Process File
- source:
- type: local
- name: .
- sync_policy: merge
path: "src/test_proj.process_file:workflow"
- python-dependencies:
- - "."
+ env-files:
+ - ".env"
ui:
name: "test-proj UI"
source:
- type: local
name: ./ui
- sync_policy: merge
diff --git a/test-proj/ui/src/lib/client.ts b/test-proj/ui/src/lib/client.ts
index f93560d..657def4 100644
--- a/test-proj/ui/src/lib/client.ts
+++ b/test-proj/ui/src/lib/client.ts
@@ -11,7 +11,7 @@ import { EXTRACTED_DATA_COLLECTION } from "./config";
const platformToken = import.meta.env.VITE_LLAMA_CLOUD_API_KEY;
const apiBaseUrl = import.meta.env.VITE_LLAMA_CLOUD_BASE_URL;
-const projectId = import.meta.env.VITE_LLAMA_CLOUD_PROJECT_ID;
+const projectId = import.meta.env.VITE_LLAMA_DEPLOY_PROJECT_ID;
// Configure the platform client
cloudApiClient.setConfig({
diff --git a/test-proj/ui/src/main.tsx b/test-proj/ui/src/main.tsx
index f65f820..b56bed7 100644
--- a/test-proj/ui/src/main.tsx
+++ b/test-proj/ui/src/main.tsx
@@ -1,17 +1,14 @@
import { StrictMode } from "react";
import { createRoot } from "react-dom/client";
-import { BrowserRouter } from "react-router-dom";
+import { HashRouter } from "react-router-dom";
import App from "./App";
import "@llamaindex/ui/styles.css";
import "./index.css";
-// https://github.com/run-llama/llama_deploy/blob/main/llama_deploy/apiserver/deployment.py#L183
-const base = import.meta.env.VITE_LLAMA_DEPLOY_BASE_PATH ?? "/";
-
createRoot(document.getElementById("root")!).render(
-
+
-
+
,
);
diff --git a/test-proj/ui/vite.config.ts b/test-proj/ui/vite.config.ts
index 4414c38..43692ae 100644
--- a/test-proj/ui/vite.config.ts
+++ b/test-proj/ui/vite.config.ts
@@ -4,8 +4,8 @@ import path from "path";
// https://vitejs.dev/config/
export default defineConfig(({}) => {
- const deploymentId = process.env.LLAMA_DEPLOY_NEXTJS_DEPLOYMENT_NAME;
- const basePath = `/deployments/${deploymentId}/ui`;
+ const deploymentId = process.env.LLAMA_DEPLOY_DEPLOYMENT_URL_ID;
+ const basePath = process.env.LLAMA_DEPLOY_DEPLOYMENT_BASE_PATH;
const projectId = process.env.LLAMA_DEPLOY_PROJECT_ID;
return {
@@ -30,9 +30,9 @@ export default defineConfig(({}) => {
define: {
"import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME":
JSON.stringify(deploymentId),
- "import.meta.env.VITE_LLAMA_DEPLOY_BASE_PATH": JSON.stringify(basePath),
+ "import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_BASE_PATH": JSON.stringify(basePath),
...(projectId && {
- "import.meta.env.VITE_LLAMA_CLOUD_PROJECT_ID":
+ "import.meta.env.VITE_LLAMA_DEPLOY_PROJECT_ID":
JSON.stringify(projectId),
}),
},
diff --git a/ui/src/lib/client.ts b/ui/src/lib/client.ts
index f93560d..657def4 100644
--- a/ui/src/lib/client.ts
+++ b/ui/src/lib/client.ts
@@ -11,7 +11,7 @@ import { EXTRACTED_DATA_COLLECTION } from "./config";
const platformToken = import.meta.env.VITE_LLAMA_CLOUD_API_KEY;
const apiBaseUrl = import.meta.env.VITE_LLAMA_CLOUD_BASE_URL;
-const projectId = import.meta.env.VITE_LLAMA_CLOUD_PROJECT_ID;
+const projectId = import.meta.env.VITE_LLAMA_DEPLOY_PROJECT_ID;
// Configure the platform client
cloudApiClient.setConfig({
diff --git a/ui/src/main.tsx b/ui/src/main.tsx
index f65f820..b56bed7 100644
--- a/ui/src/main.tsx
+++ b/ui/src/main.tsx
@@ -1,17 +1,14 @@
import { StrictMode } from "react";
import { createRoot } from "react-dom/client";
-import { BrowserRouter } from "react-router-dom";
+import { HashRouter } from "react-router-dom";
import App from "./App";
import "@llamaindex/ui/styles.css";
import "./index.css";
-// https://github.com/run-llama/llama_deploy/blob/main/llama_deploy/apiserver/deployment.py#L183
-const base = import.meta.env.VITE_LLAMA_DEPLOY_BASE_PATH ?? "/";
-
createRoot(document.getElementById("root")!).render(
-
+
-
+
,
);
diff --git a/ui/vite.config.ts b/ui/vite.config.ts
index 4414c38..43692ae 100644
--- a/ui/vite.config.ts
+++ b/ui/vite.config.ts
@@ -4,8 +4,8 @@ import path from "path";
// https://vitejs.dev/config/
export default defineConfig(({}) => {
- const deploymentId = process.env.LLAMA_DEPLOY_NEXTJS_DEPLOYMENT_NAME;
- const basePath = `/deployments/${deploymentId}/ui`;
+ const deploymentId = process.env.LLAMA_DEPLOY_DEPLOYMENT_URL_ID;
+ const basePath = process.env.LLAMA_DEPLOY_DEPLOYMENT_BASE_PATH;
const projectId = process.env.LLAMA_DEPLOY_PROJECT_ID;
return {
@@ -30,9 +30,9 @@ export default defineConfig(({}) => {
define: {
"import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME":
JSON.stringify(deploymentId),
- "import.meta.env.VITE_LLAMA_DEPLOY_BASE_PATH": JSON.stringify(basePath),
+ "import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_BASE_PATH": JSON.stringify(basePath),
...(projectId && {
- "import.meta.env.VITE_LLAMA_CLOUD_PROJECT_ID":
+ "import.meta.env.VITE_LLAMA_DEPLOY_PROJECT_ID":
JSON.stringify(projectId),
}),
},