Skip to content

Commit

Permalink
adding cli_config aand client changes
Browse files Browse the repository at this point in the history
  • Loading branch information
KrishnaM251 committed Jun 18, 2024
2 parents 7aca339 + 09f2e41 commit 7977a1d
Show file tree
Hide file tree
Showing 79 changed files with 3,669 additions and 1,417 deletions.
1 change: 1 addition & 0 deletions .github/workflows/docker-integration-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ jobs:
pipx install poetry==1.8.2
poetry install -E dev
poetry run pytest -s tests/test_client.py
poetry run pytest -s tests/test_concurrent_connections.py
- name: Print docker logs if tests fail
if: failure()
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/rdme-openapi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ jobs:
- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.11"
poetry-version: "1.7.1"
install-args: "--all-extras"
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev"

- name: Generate openapi.json file
run: |
Expand Down
28 changes: 28 additions & 0 deletions .github/workflows/test_local.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: Endpoint (Local)

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4

- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev -E local"

- name: Test embedding endpoint
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_embedding_endpoint_local
30 changes: 30 additions & 0 deletions .github/workflows/test_memgpt_hosted.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: Endpoint (MemGPT)

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4

- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev"

- name: Test LLM endpoint
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_llm_endpoint_memgpt_hosted
- name: Test embedding endpoint
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_embedding_endpoint_memgpt_hosted
38 changes: 38 additions & 0 deletions .github/workflows/test_ollama.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
name: Endpoint (Ollama)

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Start Ollama Server
run: |
curl -fsSL https://ollama.com/install.sh | sh
ollama serve &
sleep 10 # wait for server
ollama pull dolphin2.2-mistral:7b-q6_K
ollama pull mxbai-embed-large
- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev -E ollama"

- name: Test LLM endpoint
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_llm_endpoint_ollama
- name: Test embedding endpoint
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_embedding_endpoint_ollama
43 changes: 43 additions & 0 deletions .github/workflows/test_openai.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
name: Endpoint (OpenAI)

env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4

- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev"

- name: Initialize credentials
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run memgpt quickstart --backend openai
- name: Test LLM endpoint
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_llm_endpoint_openai
- name: Test embedding endpoint
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_embedding_endpoint_openai
25 changes: 10 additions & 15 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ name: Run All pytest Tests

env:
MEMGPT_PGURI: ${{ secrets.MEMGPT_PGURI }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

on:
push:
Expand All @@ -14,30 +13,29 @@ jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15

services:
qdrant:
image: qdrant/qdrant
ports:
- 6333:6333

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Build and run container
run: bash db/run_postgres.sh


- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "--all-extras"
install-args: "-E dev -E postgres -E milvus"

- name: Initialize credentials
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
if [ -z "$OPENAI_API_KEY" ]; then
poetry run memgpt quickstart --backend openai
else
poetry run memgpt quickstart --backend memgpt
fi
run: poetry run memgpt quickstart --backend memgpt

#- name: Run docker compose server
# env:
Expand All @@ -56,7 +54,6 @@ jobs:
MEMGPT_PG_PASSWORD: memgpt
MEMGPT_PG_DB: memgpt
MEMGPT_PG_HOST: localhost
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MEMGPT_SERVER_PASS: test_server_token
run: |
poetry run pytest -s -vv tests/test_server.py
Expand All @@ -68,11 +65,10 @@ jobs:
MEMGPT_PG_PASSWORD: memgpt
MEMGPT_PG_HOST: localhost
MEMGPT_PG_DB: memgpt
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MEMGPT_SERVER_PASS: test_server_token
PYTHONPATH: ${{ github.workspace }}:${{ env.PYTHONPATH }}
run: |
poetry run pytest -s -vv -k "not test_storage and not test_server and not test_openai_client" tests
poetry run pytest -s -vv -k "not test_concurrent_connections.py and not test_quickstart and not test_endpoints and not test_storage and not test_server and not test_openai_client" tests
- name: Run storage tests
env:
Expand All @@ -81,7 +77,6 @@ jobs:
MEMGPT_PG_PASSWORD: memgpt
MEMGPT_PG_HOST: localhost
MEMGPT_PG_DB: memgpt
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MEMGPT_SERVER_PASS: test_server_token
run: |
poetry run pytest -s -vv tests/test_storage.py
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -1015,4 +1015,5 @@ pgdata/

## pytest mirrors
memgpt/.pytest_cache/
memgpy/pytest.ini
memgpy/pytest.ini
**/**/pytest_cache
8 changes: 4 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# The builder image, used to build the virtual environment
FROM python:3.11-bookworm as builder
FROM python:3.12.2-bookworm as builder
ARG MEMGPT_ENVIRONMENT=PRODUCTION
ENV MEMGPT_ENVIRONMENT=${MEMGPT_ENVIRONMENT}
RUN pip install poetry==1.4.2
RUN pip install poetry==1.8.2

ENV POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_IN_PROJECT=1 \
Expand All @@ -16,13 +16,13 @@ RUN poetry lock --no-update
RUN if [ "$MEMGPT_ENVIRONMENT" = "DEVELOPMENT" ] ; then \
poetry install --no-root -E "postgres server dev autogen" ; \
else \
poetry install --without dev --no-root -E "postgres server" && \
poetry install --no-root -E "postgres server" && \
rm -rf $POETRY_CACHE_DIR ; \
fi


# The runtime image, used to just run the code provided its virtual environment
FROM python:3.11-slim-bookworm as runtime
FROM python:3.12.2-slim-bookworm as runtime
ARG MEMGPT_ENVIRONMENT=PRODUCTION
ENV MEMGPT_ENVIRONMENT=${MEMGPT_ENVIRONMENT}
ENV VIRTUAL_ENV=/app/.venv \
Expand Down
22 changes: 11 additions & 11 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,18 @@ MemGPT makes it easy to build and deploy stateful LLM agents with support for:
* Connections to [external data sources](https://memgpt.readme.io/docs/data_sources) (e.g. PDF files) for RAG
* Defining and calling [custom tools](https://memgpt.readme.io/docs/functions) (e.g. [google search](https://github.com/cpacker/MemGPT/blob/main/examples/google_search.py))

You can also use MemGPT to depoy agents as a *service*. You can use a MemGPT server to run a multi-user, multi-agent application on top of supported LLM providers.
You can also use MemGPT to deploy agents as a *service*. You can use a MemGPT server to run a multi-user, multi-agent application on top of supported LLM providers.

<img width="1000" alt="image" src="https://github.com/cpacker/MemGPT/assets/8505980/1096eb91-139a-4bc5-b908-fa585462da09">


## Installation & Setup
## Installation & Setup
Install MemGPT:
```sh
pip install -U pymemgpt
```

To use MemGPT with OpenAI, set the environment variable `OPENAI_API_KEY` to your OpenAI key then run:
To use MemGPT with OpenAI, set the environment variable `OPENAI_API_KEY` to your OpenAI key then run:
```
memgpt quickstart --backend openai
```
Expand All @@ -54,19 +54,20 @@ MemGPT provides a developer portal that enables you to easily create, edit, moni

<img width="1000" alt="image" src="https://github.com/cpacker/MemGPT/assets/5475622/071117c5-46a7-4953-bc9d-d74880e66258">

## Quickstart (Server)
## Quickstart (Server)

**Option 1 (Recommended)**: Run with docker compose
**Option 1 (Recommended)**: Run with docker compose
1. [Install docker on your system](https://docs.docker.com/get-docker/)
2. Clone the repo: `git clone https://github.com/cpacker/MemGPT.git`
3. Run `docker compose up`
4. Go to `memgpt.localhost` in the browser to view the developer portal
3. Copy-paste `.env.example` to `.env` and optionally modify
4. Run `docker compose up`
5. Go to `memgpt.localhost` in the browser to view the developer portal

**Option 2:** Run with the CLI:
1. Run `memgpt server`
2. Go to `localhost:8283` in the browser to view the developer portal

Once the server is running, you can use the [Python client](https://memgpt.readme.io/docs/admin-client) or [REST API](https://memgpt.readme.io/reference/api) to connect to `memgpt.localhost` (if you're running with docker compose) or `localhost:8283` (if you're running with the CLI) to create users, agents, and more. The service requires authentication with a MemGPT admin password, which can be set with running `export MEMGPT_SERVER_PASS=password`.
Once the server is running, you can use the [Python client](https://memgpt.readme.io/docs/admin-client) or [REST API](https://memgpt.readme.io/reference/api) to connect to `memgpt.localhost` (if you're running with docker compose) or `localhost:8283` (if you're running with the CLI) to create users, agents, and more. The service requires authentication with a MemGPT admin password; it is the value of `MEMGPT_SERVER_PASS` in `.env`.

## Supported Endpoints & Backends
MemGPT is designed to be model and provider agnostic. The following LLM and embedding endpoints are supported:
Expand All @@ -80,7 +81,7 @@ MemGPT is designed to be model and provider agnostic. The following LLM and embe
| Groq | ✅ (alpha release) ||
| Cohere API |||
| vLLM |||
| Ollama || |
| Ollama || |
| LM Studio |||
| koboldcpp |||
| oobabooga web UI |||
Expand All @@ -95,12 +96,11 @@ When using MemGPT with open LLMs (such as those downloaded from HuggingFace), th
* **Report Issues or Suggest Features**: Have an issue or a feature request? Please submit them through our [GitHub Issues page](https://github.com/cpacker/MemGPT/issues).
* **Explore the Roadmap**: Curious about future developments? View and comment on our [project roadmap](https://github.com/cpacker/MemGPT/issues/1200).
* **Benchmark the Performance**: Want to benchmark the performance of a model on MemGPT? Follow our [Benchmarking Guidance](#benchmarking-guidance).
* **Join Community Events**: Stay updated with the [MemGPT event calendar](https://lu.ma/berkeley-llm-meetup) or follow our [Twitter account](https://twitter.com/MemGPT).
* **Join Community Events**: Stay updated with the [MemGPT event calendar](https://lu.ma/berkeley-llm-meetup) or follow our [Twitter account](https://twitter.com/MemGPT).


## Benchmarking Guidance
To evaluate the performance of a model on MemGPT, simply configure the appropriate model settings using `memgpt configure`, and then initiate the benchmark via `memgpt benchmark`. The duration will vary depending on your hardware. This will run through a predefined set of prompts through multiple iterations to test the function calling capabilities of a model. You can help track what LLMs work well with MemGPT by contributing your benchmark results via [this form](https://forms.gle/XiBGKEEPFFLNSR348), which will be used to update the spreadsheet.

## Legal notices
By using MemGPT and related MemGPT services (such as the MemGPT endpoint or hosted service), you agree to our [privacy policy](https://github.com/cpacker/MemGPT/tree/main/PRIVACY.md) and [terms of service](https://github.com/cpacker/MemGPT/tree/main/TERMS.md).

7 changes: 7 additions & 0 deletions configs/embedding_model_configs/local.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"embedding_endpoint": null,
"embedding_model": "BAAI/bge-small-en-v1.5",
"embedding_dim": 384,
"embedding_chunk_size": 300,
"embedding_endpoint_type": "local"
}
3 changes: 2 additions & 1 deletion configs/embedding_model_configs/memgpt-hosted.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@
"embedding_endpoint": "https://embeddings.memgpt.ai",
"embedding_model": "BAAI/bge-large-en-v1.5",
"embedding_dim": 1024,
"embedding_chunk_size": 300
"embedding_chunk_size": 300,
"embedding_endpoint_type": "hugging-face"
}
7 changes: 7 additions & 0 deletions configs/embedding_model_configs/ollama.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"embedding_endpoint_type": "ollama",
"embedding_endpoint": "http://127.0.0.1:11434",
"embedding_model": "mxbai-embed-large",
"embedding_dim": 512,
"embedding_chunk_size": 200
}
6 changes: 6 additions & 0 deletions configs/llm_model_configs/ollama.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"context_window": 8192,
"model_endpoint_type": "ollama",
"model_endpoint": "http://127.0.0.1:11434",
"model": "dolphin2.2-mistral:7b-q6_K"
}
2 changes: 2 additions & 0 deletions dev-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ services:
build:
context: .
dockerfile: Dockerfile
target: runtime
depends_on:
- memgpt_db
ports:
Expand All @@ -35,6 +36,7 @@ services:
- MEMGPT_PG_HOST=pgvector_db
- MEMGPT_PG_PORT=5432
- OPENAI_API_KEY=${OPENAI_API_KEY}
- SERPAPI_API_KEY=${SERPAPI_API_KEY}
volumes:
- ./configs/server_config.yaml:/root/.memgpt/config # config file
# ~/.memgpt/credentials:/root/.memgpt/credentials # credentials file
Loading

0 comments on commit 7977a1d

Please sign in to comment.