Skip to content

Commit 730828c

Browse files
committed
misc/py-litellm: add port: Call all LLM APIs using the OpenAI format
Call all LLM APIs using the OpenAI format [Bedrock, Huggingface, VertexAI, TogetherAI, Azure, OpenAI, etc.] LiteLLM manages: - Translate inputs to provider's completion, embedding, and image_generation endpoints - Consistent output, text responses will always be available at ['choices'][0]['message']['content'] - Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI) - Router - Track spend & set budgets per project OpenAI Proxy Server WWW: https://github.com/BerriAI/litellm
1 parent e3dfc2f commit 730828c

File tree

5 files changed

+69
-0
lines changed

5 files changed

+69
-0
lines changed

misc/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -435,6 +435,7 @@
435435
SUBDIR += py-lazrs
436436
SUBDIR += py-lightgbm
437437
SUBDIR += py-lightning-utilities
438+
SUBDIR += py-litellm
438439
SUBDIR += py-log_symbols
439440
SUBDIR += py-mffpy
440441
SUBDIR += py-mmcv

misc/py-litellm/Makefile

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
PORTNAME= litellm
2+
DISTVERSION= 1.23.9
3+
CATEGORIES= misc python
4+
MASTER_SITES= PYPI
5+
PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
6+
7+
MAINTAINER= tagattie@FreeBSD.org
8+
COMMENT= Call all LLM APIs using the OpenAI format
9+
WWW= https://github.com/BerriAI/litellm
10+
11+
LICENSE= MIT
12+
LICENSE_FILE= ${WRKSRC}/LICENSE
13+
14+
BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}poetry-core>0:devel/py-poetry-core@${PY_FLAVOR} \
15+
${PYTHON_PKGNAMEPREFIX}wheel>0:devel/py-wheel@${PY_FLAVOR}
16+
RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}openai>=1.0.0:misc/py-openai@${PY_FLAVOR} \
17+
${PYTHON_PKGNAMEPREFIX}python-dotenv>=0.2.0:www/py-python-dotenv@${PY_FLAVOR} \
18+
${PYTHON_PKGNAMEPREFIX}tiktoken>=0.4.0:textproc/py-tiktoken@${PY_FLAVOR} \
19+
${PYTHON_PKGNAMEPREFIX}importlib-metadata>=6.8.0:devel/py-importlib-metadata@${PY_FLAVOR} \
20+
${PYTHON_PKGNAMEPREFIX}tokenizers>0:textproc/py-tokenizers@${PY_FLAVOR} \
21+
${PYTHON_PKGNAMEPREFIX}click>0:devel/py-click@${PY_FLAVOR} \
22+
${PYTHON_PKGNAMEPREFIX}Jinja2>=3.1.2<4.0.0:devel/py-Jinja2@${PY_FLAVOR} \
23+
${PYTHON_PKGNAMEPREFIX}aiohttp>0:www/py-aiohttp@${PY_FLAVOR} \
24+
${PYTHON_PKGNAMEPREFIX}requests>=2.31.0<3.0.0:www/py-requests@${PY_FLAVOR}
25+
26+
USES= python shebangfix
27+
USE_PYTHON= autoplist pep517
28+
29+
REINPLACE_ARGS= -i ''
30+
NO_ARCH= yes
31+
32+
PORTDOCS= README.md
33+
34+
OPTIONS_DEFINE= DOCS
35+
36+
post-patch:
37+
@${REINPLACE_CMD} -e 's|%%PYTHON_CMD%%|${PYTHON_CMD}|' \
38+
${WRKSRC}/litellm/proxy/start.sh
39+
@${FIND} ${WRKSRC}/litellm/proxy -type f \
40+
\( -name '*.orig' -o -name '*.bak' \) -delete
41+
42+
post-install-DOCS-on:
43+
@${MKDIR} ${STAGEDIR}${DOCSDIR}
44+
${INSTALL_MAN} ${PORTDOCS:S|^|${WRKSRC}/|} ${STAGEDIR}${DOCSDIR}
45+
46+
.include <bsd.port.mk>

misc/py-litellm/distinfo

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
TIMESTAMP = 1707722656
2+
SHA256 (litellm-1.23.9.tar.gz) = 0c1e0e56f4d1d9c8a33da09d6736bde9b21a8ea324db8c05cc3de65c6b4fad7d
3+
SIZE (litellm-1.23.9.tar.gz) = 3139242
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
--- litellm/proxy/start.sh.orig 2024-02-11 03:13:21 UTC
2+
+++ litellm/proxy/start.sh
3+
@@ -1,2 +1,2 @@
4+
-#!/bin/bash
5+
-python3 proxy_cli.py
6+
\ No newline at end of file
7+
+#!/bin/sh
8+
+%%PYTHON_CMD%% proxy_cli.py

misc/py-litellm/pkg-descr

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
Call all LLM APIs using the OpenAI format [Bedrock, Huggingface,
2+
VertexAI, TogetherAI, Azure, OpenAI, etc.]
3+
4+
LiteLLM manages:
5+
- Translate inputs to provider's completion, embedding, and
6+
image_generation endpoints
7+
- Consistent output, text responses will always be available at
8+
['choices'][0]['message']['content']
9+
- Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI)
10+
- Router
11+
- Track spend & set budgets per project OpenAI Proxy Server

0 commit comments

Comments
 (0)