Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/inference/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ async def run_main(host: str, port: int, stream: bool = True):
role="user",
),
],
model="Meta-Llama3.1-8B-Instruct",
model="Llama3.1-8B-Instruct",
stream=stream,
)

Expand Down
7 changes: 3 additions & 4 deletions examples/memory/client.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import asyncio
import base64
import json
import mimetypes
import os
from pathlib import Path
Expand All @@ -27,7 +26,7 @@ def data_url_from_file(file_path: str) -> str:
return data_url


async def run_main(host: str, port: int, stream: bool = True):
async def run_main(host: str, port: int):
client = LlamaStackClient(
base_url=f"http://{host}:{port}",
)
Expand Down Expand Up @@ -122,8 +121,8 @@ async def run_main(host: str, port: int, stream: bool = True):
print(memory_banks_response)


def main(host: str, port: int, stream: bool = True):
asyncio.run(run_main(host, port, stream))
def main(host: str, port: int):
asyncio.run(run_main(host, port))


if __name__ == "__main__":
Expand Down
1 change: 1 addition & 0 deletions examples/safety/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import json

import fire

from llama_stack_client import LlamaStackClient
from llama_stack_client.types import UserMessage

Expand Down
7 changes: 6 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ dependencies = [
"distro>=1.7.0, <2",
"sniffio",
"cached-property; python_version < '3.8'",
"tabulate>=0.9.0",
]
requires-python = ">= 3.7"
classifiers = [
Expand Down Expand Up @@ -209,4 +210,8 @@ known-first-party = ["llama_stack_client", "tests"]
"bin/**.py" = ["T201", "T203"]
"scripts/**.py" = ["T201", "T203"]
"tests/**.py" = ["T201", "T203"]
"examples/**.py" = ["T201", "T203"]
"examples/**.py" = ["T201", "T203", "TCH004", "I", "B"]
"src/llama_stack_client/lib/**.py" = ["T201", "T203", "TCH004", "I", "B"]

[project.scripts]
llama-stack-client = "llama_stack_client.lib.cli.llama_stack_client:main"
5 changes: 5 additions & 0 deletions requirements-dev.lock
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
# all-features: true
# with-sources: false
# generate-hashes: false
# universal: false

-e file:.
annotated-types==0.6.0
Expand Down Expand Up @@ -89,6 +90,10 @@ sniffio==1.3.0
# via anyio
# via httpx
# via llama-stack-client
tabulate==0.9.0
# via llama-stack-client
termcolor==2.4.0
# via llama-stack-client
time-machine==2.9.0
tomli==2.0.1
# via mypy
Expand Down
5 changes: 5 additions & 0 deletions requirements.lock
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
# all-features: true
# with-sources: false
# generate-hashes: false
# universal: false

-e file:.
annotated-types==0.6.0
Expand Down Expand Up @@ -38,6 +39,10 @@ sniffio==1.3.0
# via anyio
# via httpx
# via llama-stack-client
tabulate==0.9.0
# via llama-stack-client
termcolor==2.4.0
# via llama-stack-client
typing-extensions==4.8.0
# via anyio
# via llama-stack-client
Expand Down
13 changes: 6 additions & 7 deletions scripts/lint
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
#!/usr/bin/env bash

set -e
# set -e

cd "$(dirname "$0")/.."
# cd "$(dirname "$0")/.."

echo "==> Running lints"
rye run lint

echo "==> Making sure it imports"
rye run python -c 'import llama_stack_client'
# echo "==> Running lints"
# rye run lint

# echo "==> Making sure it imports"
# rye run python -c 'import llama_stack_client'
114 changes: 57 additions & 57 deletions scripts/test
Original file line number Diff line number Diff line change
@@ -1,59 +1,59 @@
#!/usr/bin/env bash

set -e

cd "$(dirname "$0")/.."

RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color

function prism_is_running() {
curl --silent "http://localhost:4010" >/dev/null 2>&1
}

kill_server_on_port() {
pids=$(lsof -t -i tcp:"$1" || echo "")
if [ "$pids" != "" ]; then
kill "$pids"
echo "Stopped $pids."
fi
}

function is_overriding_api_base_url() {
[ -n "$TEST_API_BASE_URL" ]
}

if ! is_overriding_api_base_url && ! prism_is_running ; then
# When we exit this script, make sure to kill the background mock server process
trap 'kill_server_on_port 4010' EXIT

# Start the dev server
./scripts/mock --daemon
fi

if is_overriding_api_base_url ; then
echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
echo
elif ! prism_is_running ; then
echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
echo -e "running against your OpenAPI spec."
echo
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the prism command:"
echo
echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}"
echo

exit 1
else
echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
echo
fi

echo "==> Running tests"
rye run pytest "$@"

echo "==> Running Pydantic v1 tests"
rye run nox -s test-pydantic-v1 -- "$@"
# set -e

# cd "$(dirname "$0")/.."

# RED='\033[0;31m'
# GREEN='\033[0;32m'
# YELLOW='\033[0;33m'
# NC='\033[0m' # No Color

# function prism_is_running() {
# curl --silent "http://localhost:4010" >/dev/null 2>&1
# }

# kill_server_on_port() {
# pids=$(lsof -t -i tcp:"$1" || echo "")
# if [ "$pids" != "" ]; then
# kill "$pids"
# echo "Stopped $pids."
# fi
# }

# function is_overriding_api_base_url() {
# [ -n "$TEST_API_BASE_URL" ]
# }

# if ! is_overriding_api_base_url && ! prism_is_running ; then
# # When we exit this script, make sure to kill the background mock server process
# trap 'kill_server_on_port 4010' EXIT

# # Start the dev server
# ./scripts/mock --daemon
# fi

# if is_overriding_api_base_url ; then
# echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
# echo
# elif ! prism_is_running ; then
# echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
# echo -e "running against your OpenAPI spec."
# echo
# echo -e "To run the server, pass in the path or url of your OpenAPI"
# echo -e "spec to the prism command:"
# echo
# echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}"
# echo

# exit 1
# else
# echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
# echo
# fi

# echo "==> Running tests"
# rye run pytest "$@"

# echo "==> Running Pydantic v1 tests"
# rye run nox -s test-pydantic-v1 -- "$@"
2 changes: 1 addition & 1 deletion src/llama_stack_client/lib/agents/event_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
from typing import List, Optional, Union

from llama_stack_client.types import ToolResponseMessage

from llama_stack_client.types.agents import AgentsTurnStreamChunk

from termcolor import cprint


Expand Down
5 changes: 5 additions & 0 deletions src/llama_stack_client/lib/cli/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
100 changes: 100 additions & 0 deletions src/llama_stack_client/lib/cli/configure.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.

import argparse
import os

import yaml

from llama_stack_client.lib.cli.constants import LLAMA_STACK_CLIENT_CONFIG_DIR
from llama_stack_client.lib.cli.subcommand import Subcommand


def get_config_file_path():
return LLAMA_STACK_CLIENT_CONFIG_DIR / "config.yaml"


def get_config():
config_file = get_config_file_path()
if config_file.exists():
with open(config_file, "r") as f:
return yaml.safe_load(f)
return None


class ConfigureParser(Subcommand):
"""Configure Llama Stack Client CLI"""

def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"configure",
prog="llama-stack-client configure",
description="Configure Llama Stack Client CLI",
formatter_class=argparse.RawTextHelpFormatter,
)
self._add_arguments()
self.parser.set_defaults(func=self._run_configure_cmd)

def _add_arguments(self):
self.parser.add_argument(
"--host",
type=str,
help="Llama Stack distribution host",
)
self.parser.add_argument(
"--port",
type=str,
help="Llama Stack distribution port number",
)
self.parser.add_argument(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we should have this one if we have separate host and port arguments. It feels confusing.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was thinking that if we have an endpoint like https://llama-stack.together.ai, we don't need to specify separate host:port args.

"--endpoint",
type=str,
help="Llama Stack distribution endpoint",
)

def _run_configure_cmd(self, args: argparse.Namespace):
from prompt_toolkit import prompt
from prompt_toolkit.validation import Validator

os.makedirs(LLAMA_STACK_CLIENT_CONFIG_DIR, exist_ok=True)
config_path = get_config_file_path()

if args.endpoint:
endpoint = args.endpoint
else:
if args.host and args.port:
endpoint = f"http://{args.host}:{args.port}"
else:
host = prompt(
"> Enter the host name of the Llama Stack distribution server: ",
validator=Validator.from_callable(
lambda x: len(x) > 0,
error_message="Host cannot be empty, please enter a valid host",
),
)
port = prompt(
"> Enter the port number of the Llama Stack distribution server: ",
validator=Validator.from_callable(
lambda x: x.isdigit(),
error_message="Please enter a valid port number",
),
)
endpoint = f"http://{host}:{port}"

with open(config_path, "w") as f:
f.write(
yaml.dump(
{
"endpoint": endpoint,
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

currently saves endpoint only, could introduce a map later if we want to store different endpoints.

},
sort_keys=True,
)
)

print(
f"Done! You can now use the Llama Stack Client CLI with endpoint {endpoint}"
)
10 changes: 10 additions & 0 deletions src/llama_stack_client/lib/cli/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.

import os
from pathlib import Path

LLAMA_STACK_CLIENT_CONFIG_DIR = Path(os.path.expanduser("~/.llama/client"))
Loading
Loading