From ac8b02a788d8075375e93fa69c2aca6c1ac5576e Mon Sep 17 00:00:00 2001 From: Guy_Leibu Date: Sun, 16 Nov 2025 13:09:18 +0200 Subject: [PATCH 01/19] First commit OpenAI Module --- modules/src/openai_proxy/openai.py | 181 +++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 modules/src/openai_proxy/openai.py diff --git a/modules/src/openai_proxy/openai.py b/modules/src/openai_proxy/openai.py new file mode 100644 index 00000000..6a6ec1c5 --- /dev/null +++ b/modules/src/openai_proxy/openai.py @@ -0,0 +1,181 @@ +# openai_proxy/openai.py + +import os +import json +from urllib.parse import urljoin +from typing import Any, Dict, List, Optional + +import requests +from fastapi import FastAPI, Request, Response, Body + +app = FastAPI( + title="OpenAI Proxy App", + description="Local FastAPI proxy for OpenAI style endpoints", + version="1.0.0", +) + +OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com").rstrip("/") +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") +OPENAI_DEFAULT_MODEL = os.getenv("OPENAI_DEFAULT_MODEL", "gpt-4o-mini") + + +def build_headers(incoming: dict) -> dict: + headers = {} + auth = incoming.get("authorization") or incoming.get("Authorization") + if auth: + headers["Authorization"] = auth + elif OPENAI_API_KEY: + headers["Authorization"] = f"Bearer {OPENAI_API_KEY}" + ctype = incoming.get("content-type") or incoming.get("Content-Type") or "application/json" + headers["Content-Type"] = ctype + return headers + + +def build_target(path: str) -> str: + base = OPENAI_BASE_URL + if base.endswith("/v1") or base.endswith("/v1/"): + base = base[:-3] if base.endswith("/v1") else base[:-4] + return urljoin(base + "/", path.lstrip("/")) + + +def forward_json(path: str, body: dict, headers: dict, query: dict): + target = build_target(path) + resp = requests.post( + target, + headers=headers, + params=query, + json=body, + timeout=60, + ) + return resp + +@app.get("/") +def health(): + return {"status": "ok"} + + +# relaxed chat endpoint, accepts any JSON that includes messages +@app.post("/v1/chat/completions") +async def chat_completions( + request: Request, + payload: Dict[str, Any] = Body(...), +): + if "messages" not in payload or not isinstance(payload["messages"], list): + return Response( + content=json.dumps({"error": "messages must be a list of chat messages"}), + status_code=400, + media_type="application/json", + ) + + if "model" not in payload or payload["model"] is None: + payload["model"] = OPENAI_DEFAULT_MODEL + + headers = build_headers(dict(request.headers)) + resp = forward_json("/v1/chat/completions", payload, headers, dict(request.query_params)) + return Response( + content=resp.content, + status_code=resp.status_code, + media_type=resp.headers.get("Content-Type", "application/json"), + ) + + +@app.post("/v1/embeddings") +async def embeddings( + request: Request, + payload: Dict[str, Any] = Body(...), +): + if "model" not in payload or not payload["model"]: + payload["model"] = "text-embedding-3-small" + headers = build_headers(dict(request.headers)) + resp = forward_json("/v1/embeddings", payload, headers, dict(request.query_params)) + return Response( + content=resp.content, + status_code=resp.status_code, + media_type=resp.headers.get("Content-Type", "application/json"), + ) + + +@app.post("/v1/responses") +async def responses_api( + request: Request, + payload: Dict[str, Any] = Body(...), +): + if "model" not in payload or payload["model"] is None: + payload["model"] = OPENAI_DEFAULT_MODEL + headers = build_headers(dict(request.headers)) + resp = forward_json("/v1/responses", payload, headers, dict(request.query_params)) + return Response( + content=resp.content, + status_code=resp.status_code, + media_type=resp.headers.get("Content-Type", "application/json"), + ) + + +# ---------------- client ---------------- +class OpenAIProxyClient: + """ + Simple client for the local proxy. + Default base url is http://localhost:8000 + If api_key is not provided, it uses OPENAI_API_KEY from environment. + """ + + def __init__(self, base_url: str = "http://localhost:8000", api_key: Optional[str] = None): + self.base_url = base_url.rstrip("/") + self.api_key = api_key + + def _headers(self) -> Dict[str, str]: + headers = {"Content-Type": "application/json"} + key = self.api_key or os.getenv("OPENAI_API_KEY", "") + if key: + headers["Authorization"] = f"Bearer {key}" + return headers + + def chat(self, messages: List[Dict[str, str]], model: Optional[str] = None) -> Dict[str, Any]: + body: Dict[str, Any] = {"messages": messages} + if model: + body["model"] = model + resp = requests.post( + f"{self.base_url}/v1/chat/completions", + headers=self._headers(), + json=body, + timeout=60, + ) + resp.raise_for_status() + return resp.json() + + def embeddings(self, text: Any, model: Optional[str] = None) -> Dict[str, Any]: + body: Dict[str, Any] = {"input": text} + if model: + body["model"] = model + resp = requests.post( + f"{self.base_url}/v1/embeddings", + headers=self._headers(), + json=body, + timeout=60, + ) + resp.raise_for_status() + return resp.json() + + def responses(self, input_text: Any, model: Optional[str] = None) -> Dict[str, Any]: + body: Dict[str, Any] = {"input": input_text} + if model: + body["model"] = model + resp = requests.post( + f"{self.base_url}/v1/responses", + headers=self._headers(), + json=body, + timeout=60, + ) + resp.raise_for_status() + return resp.json() + + +# optional quick self test when running this file directly +if __name__ == "__main__": + # start the server in another terminal first: + # uvicorn openai_proxy.openai:app --host 0.0.0.0 --port 8000 --reload + c = OpenAIProxyClient() + try: + print("Health:", requests.get(f"{c.base_url}/").json()) + except Exception as e: + print("Server not running:", e) From 15409ed504d96b064e8923bd8fe9696f5c49c7fb Mon Sep 17 00:00:00 2001 From: Guy_Leibu Date: Sun, 16 Nov 2025 13:09:36 +0200 Subject: [PATCH 02/19] First commit OpenAI Module --- modules/src/openai_proxy/item.yaml | 17 +++++++++++++++++ modules/src/openai_proxy/requirements.txt | 4 ++++ 2 files changed, 21 insertions(+) create mode 100644 modules/src/openai_proxy/item.yaml create mode 100644 modules/src/openai_proxy/requirements.txt diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy/item.yaml new file mode 100644 index 00000000..c7ce3a72 --- /dev/null +++ b/modules/src/openai_proxy/item.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +categories: +- general +description: OpenAI application runtime based on fastapi +example: openai_example.ipynb +generationDate: 2025-11-11:12-25 +hidden: false +labels: + author: Iguazio +mlrunVersion: 1.10.0 +name: openai +spec: + filename: openai_app.py + image: mlrun/mlrun + kind: general + requirements: +version: 1.0.0 \ No newline at end of file diff --git a/modules/src/openai_proxy/requirements.txt b/modules/src/openai_proxy/requirements.txt new file mode 100644 index 00000000..773b3f09 --- /dev/null +++ b/modules/src/openai_proxy/requirements.txt @@ -0,0 +1,4 @@ +fastapi>=0.110,<1.0 +uvicorn[standard]>=0.29,<1.0 +gunicorn>=21.2,<22.0 +requests>=2.31,<3.0 \ No newline at end of file From 5c04f582e6998c21fa836c089168410740941bc8 Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 13:16:19 +0200 Subject: [PATCH 03/19] Update example filename in item.yaml --- modules/src/openai_proxy/item.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy/item.yaml index c7ce3a72..33cf827e 100644 --- a/modules/src/openai_proxy/item.yaml +++ b/modules/src/openai_proxy/item.yaml @@ -2,7 +2,7 @@ apiVersion: v1 categories: - general description: OpenAI application runtime based on fastapi -example: openai_example.ipynb +example: openai.ipynb generationDate: 2025-11-11:12-25 hidden: false labels: @@ -14,4 +14,4 @@ spec: image: mlrun/mlrun kind: general requirements: -version: 1.0.0 \ No newline at end of file +version: 1.0.0 From fedf6000ae662503df59f491b0fa8befe91c55b6 Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 13:17:49 +0200 Subject: [PATCH 04/19] Delete modules/src/openai_proxy/requirements.txt No need due to no unitest --- modules/src/openai_proxy/requirements.txt | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 modules/src/openai_proxy/requirements.txt diff --git a/modules/src/openai_proxy/requirements.txt b/modules/src/openai_proxy/requirements.txt deleted file mode 100644 index 773b3f09..00000000 --- a/modules/src/openai_proxy/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -fastapi>=0.110,<1.0 -uvicorn[standard]>=0.29,<1.0 -gunicorn>=21.2,<22.0 -requests>=2.31,<3.0 \ No newline at end of file From f51c3c647065523c616f0999857705f786d91fc8 Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 13:40:03 +0200 Subject: [PATCH 05/19] Update item.yaml for OpenAI application configuration --- modules/src/openai_proxy/item.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy/item.yaml index 33cf827e..6ad194ea 100644 --- a/modules/src/openai_proxy/item.yaml +++ b/modules/src/openai_proxy/item.yaml @@ -1,6 +1,6 @@ apiVersion: v1 categories: -- general +- GenAI description: OpenAI application runtime based on fastapi example: openai.ipynb generationDate: 2025-11-11:12-25 @@ -10,8 +10,10 @@ labels: mlrunVersion: 1.10.0 name: openai spec: - filename: openai_app.py + filename: openai.py image: mlrun/mlrun kind: general - requirements: + requirements: + - fastapi + - requests version: 1.0.0 From 6d433dff41b193635b9f297ca2ff7251147fc53e Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 13:40:31 +0200 Subject: [PATCH 06/19] Update modules/src/openai_proxy/openai.py Co-authored-by: Daniel Perez <100069700+danielperezz@users.noreply.github.com> --- modules/src/openai_proxy/openai.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/src/openai_proxy/openai.py b/modules/src/openai_proxy/openai.py index 6a6ec1c5..6a81ab94 100644 --- a/modules/src/openai_proxy/openai.py +++ b/modules/src/openai_proxy/openai.py @@ -1,4 +1,17 @@ -# openai_proxy/openai.py +# Copyright 2025 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# import os import json From ef66df5065a15f5a228a55869bb0f841410d0359 Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 13:43:11 +0200 Subject: [PATCH 07/19] Change category name from 'GenAI' to 'genai' --- modules/src/openai_proxy/item.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy/item.yaml index 6ad194ea..dddb0c55 100644 --- a/modules/src/openai_proxy/item.yaml +++ b/modules/src/openai_proxy/item.yaml @@ -1,6 +1,6 @@ apiVersion: v1 categories: -- GenAI +- genai description: OpenAI application runtime based on fastapi example: openai.ipynb generationDate: 2025-11-11:12-25 From fee9a17467a5a404880f3c6d348e7a5adef28dbf Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 13:45:45 +0200 Subject: [PATCH 08/19] Update package requirements with version constraints --- modules/src/openai_proxy/item.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy/item.yaml index dddb0c55..98f188b0 100644 --- a/modules/src/openai_proxy/item.yaml +++ b/modules/src/openai_proxy/item.yaml @@ -14,6 +14,6 @@ spec: image: mlrun/mlrun kind: general requirements: - - fastapi - - requests + - fastapi>=0.110,<1.0 + - requests>=2.31,<3.0 version: 1.0.0 From 61753367e63687f807877c16ee04bc5716767f6a Mon Sep 17 00:00:00 2001 From: Guy_Leibu Date: Sun, 16 Nov 2025 14:15:39 +0200 Subject: [PATCH 09/19] Second commit adding notebook --- modules/src/openai_proxy/openai.ipynb | 86 +++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 modules/src/openai_proxy/openai.ipynb diff --git a/modules/src/openai_proxy/openai.ipynb b/modules/src/openai_proxy/openai.ipynb new file mode 100644 index 00000000..3c2d456a --- /dev/null +++ b/modules/src/openai_proxy/openai.ipynb @@ -0,0 +1,86 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "967b4d5d-7250-40bf-8149-de11e1e3244c", + "metadata": {}, + "outputs": [], + "source": [ + "import mlrun\n", + "import pandas as pd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17d208f4-a00a-42ef-a849-0fa79bed10cb", + "metadata": {}, + "outputs": [], + "source": [ + "project = mlrun.get_or_create_project(\"fastapi-openai\",user_project=True,context=\"./src\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f2d4ba7-3f6a-41f6-a030-e30deeda2888", + "metadata": {}, + "outputs": [], + "source": [ + "from openai_app import OpenAIModule" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6432b825-3c2a-4ac9-ada7-74c3cfe5d949", + "metadata": {}, + "outputs": [], + "source": [ + "app = OpenAIModule(project=project)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "67c93a0d-8240-48b8-808e-9cd0af418309", + "metadata": {}, + "outputs": [], + "source": [ + "app = mlrun.import_module(\"hub://openai\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "93e67d6a-5f53-4bda-b0b5-4e2977088139", + "metadata": {}, + "outputs": [], + "source": [ + "app.fastapi_app.deploy()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d9c7dbe413f605c71dcbc78888e0905dd1f5a085 Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 14:40:39 +0200 Subject: [PATCH 10/19] Refactor OpenAI proxy to use base64 encoded script Refactor OpenAI proxy implementation to use base64 encoded script and update FastAPI app configuration. --- modules/src/openai_proxy/openai.py | 214 +++++------------------------ 1 file changed, 37 insertions(+), 177 deletions(-) diff --git a/modules/src/openai_proxy/openai.py b/modules/src/openai_proxy/openai.py index 6a81ab94..f45658ed 100644 --- a/modules/src/openai_proxy/openai.py +++ b/modules/src/openai_proxy/openai.py @@ -13,182 +13,42 @@ # limitations under the License. # -import os -import json -from urllib.parse import urljoin -from typing import Any, Dict, List, Optional -import requests -from fastapi import FastAPI, Request, Response, Body -app = FastAPI( - title="OpenAI Proxy App", - description="Local FastAPI proxy for OpenAI style endpoints", - version="1.0.0", -) - -OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com").rstrip("/") -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") -OPENAI_DEFAULT_MODEL = os.getenv("OPENAI_DEFAULT_MODEL", "gpt-4o-mini") - - -def build_headers(incoming: dict) -> dict: - headers = {} - auth = incoming.get("authorization") or incoming.get("Authorization") - if auth: - headers["Authorization"] = auth - elif OPENAI_API_KEY: - headers["Authorization"] = f"Bearer {OPENAI_API_KEY}" - ctype = incoming.get("content-type") or incoming.get("Content-Type") or "application/json" - headers["Content-Type"] = ctype - return headers - - -def build_target(path: str) -> str: - base = OPENAI_BASE_URL - if base.endswith("/v1") or base.endswith("/v1/"): - base = base[:-3] if base.endswith("/v1") else base[:-4] - return urljoin(base + "/", path.lstrip("/")) - - -def forward_json(path: str, body: dict, headers: dict, query: dict): - target = build_target(path) - resp = requests.post( - target, - headers=headers, - params=query, - json=body, - timeout=60, - ) - return resp - -@app.get("/") -def health(): - return {"status": "ok"} - - -# relaxed chat endpoint, accepts any JSON that includes messages -@app.post("/v1/chat/completions") -async def chat_completions( - request: Request, - payload: Dict[str, Any] = Body(...), -): - if "messages" not in payload or not isinstance(payload["messages"], list): - return Response( - content=json.dumps({"error": "messages must be a list of chat messages"}), - status_code=400, - media_type="application/json", - ) - - if "model" not in payload or payload["model"] is None: - payload["model"] = OPENAI_DEFAULT_MODEL - - headers = build_headers(dict(request.headers)) - resp = forward_json("/v1/chat/completions", payload, headers, dict(request.query_params)) - return Response( - content=resp.content, - status_code=resp.status_code, - media_type=resp.headers.get("Content-Type", "application/json"), - ) - - -@app.post("/v1/embeddings") -async def embeddings( - request: Request, - payload: Dict[str, Any] = Body(...), -): - if "model" not in payload or not payload["model"]: - payload["model"] = "text-embedding-3-small" - headers = build_headers(dict(request.headers)) - resp = forward_json("/v1/embeddings", payload, headers, dict(request.query_params)) - return Response( - content=resp.content, - status_code=resp.status_code, - media_type=resp.headers.get("Content-Type", "application/json"), - ) - - -@app.post("/v1/responses") -async def responses_api( - request: Request, - payload: Dict[str, Any] = Body(...), -): - if "model" not in payload or payload["model"] is None: - payload["model"] = OPENAI_DEFAULT_MODEL - headers = build_headers(dict(request.headers)) - resp = forward_json("/v1/responses", payload, headers, dict(request.query_params)) - return Response( - content=resp.content, - status_code=resp.status_code, - media_type=resp.headers.get("Content-Type", "application/json"), - ) - - -# ---------------- client ---------------- -class OpenAIProxyClient: - """ - Simple client for the local proxy. - Default base url is http://localhost:8000 - If api_key is not provided, it uses OPENAI_API_KEY from environment. - """ - - def __init__(self, base_url: str = "http://localhost:8000", api_key: Optional[str] = None): - self.base_url = base_url.rstrip("/") - self.api_key = api_key - - def _headers(self) -> Dict[str, str]: - headers = {"Content-Type": "application/json"} - key = self.api_key or os.getenv("OPENAI_API_KEY", "") - if key: - headers["Authorization"] = f"Bearer {key}" - return headers - - def chat(self, messages: List[Dict[str, str]], model: Optional[str] = None) -> Dict[str, Any]: - body: Dict[str, Any] = {"messages": messages} - if model: - body["model"] = model - resp = requests.post( - f"{self.base_url}/v1/chat/completions", - headers=self._headers(), - json=body, - timeout=60, - ) - resp.raise_for_status() - return resp.json() - - def embeddings(self, text: Any, model: Optional[str] = None) -> Dict[str, Any]: - body: Dict[str, Any] = {"input": text} - if model: - body["model"] = model - resp = requests.post( - f"{self.base_url}/v1/embeddings", - headers=self._headers(), - json=body, - timeout=60, - ) - resp.raise_for_status() - return resp.json() - - def responses(self, input_text: Any, model: Optional[str] = None) -> Dict[str, Any]: - body: Dict[str, Any] = {"input": input_text} - if model: - body["model"] = model - resp = requests.post( - f"{self.base_url}/v1/responses", - headers=self._headers(), - json=body, - timeout=60, - ) - resp.raise_for_status() - return resp.json() - - -# optional quick self test when running this file directly -if __name__ == "__main__": - # start the server in another terminal first: - # uvicorn openai_proxy.openai:app --host 0.0.0.0 --port 8000 --reload - c = OpenAIProxyClient() - try: - print("Health:", requests.get(f"{c.base_url}/").json()) - except Exception as e: - print("Server not running:", e) +BASE64 = "IyBvcGVuYWlfcHJveHkvb3BlbmFpLnB5CgppbXBvcnQgb3MKaW1wb3J0IGpzb24KZnJvbSB1cmxsaWIucGFyc2UgaW1wb3J0IHVybGpvaW4KZnJvbSB0eXBpbmcgaW1wb3J0IEFueSwgRGljdCwgTGlzdCwgT3B0aW9uYWwKCmltcG9ydCByZXF1ZXN0cwpmcm9tIGZhc3RhcGkgaW1wb3J0IEZhc3RBUEksIFJlcXVlc3QsIFJlc3BvbnNlLCBCb2R5CgphcHAgPSBGYXN0QVBJKAogICAgdGl0bGU9Ik9wZW5BSSBQcm94eSBBcHAiLAogICAgZGVzY3JpcHRpb249IkxvY2FsIEZhc3RBUEkgcHJveHkgZm9yIE9wZW5BSSBzdHlsZSBlbmRwb2ludHMiLAogICAgdmVyc2lvbj0iMS4wLjAiLAopCgpPUEVOQUlfQkFTRV9VUkwgPSBvcy5nZXRlbnYoIk9QRU5BSV9CQVNFX1VSTCIsICJodHRwczovL2FwaS5vcGVuYWkuY29tIikucnN0cmlwKCIvIikKT1BFTkFJX0FQSV9LRVkgPSBvcy5nZXRlbnYoIk9QRU5BSV9BUElfS0VZIiwgIiIpCk9QRU5BSV9ERUZBVUxUX01PREVMID0gb3MuZ2V0ZW52KCJPUEVOQUlfREVGQVVMVF9NT0RFTCIsICJncHQtNG8tbWluaSIpCgoKZGVmIGJ1aWxkX2hlYWRlcnMoaW5jb21pbmc6IGRpY3QpIC0+IGRpY3Q6CiAgICBoZWFkZXJzID0ge30KICAgIGF1dGggPSBpbmNvbWluZy5nZXQoImF1dGhvcml6YXRpb24iKSBvciBpbmNvbWluZy5nZXQoIkF1dGhvcml6YXRpb24iKQogICAgaWYgYXV0aDoKICAgICAgICBoZWFkZXJzWyJBdXRob3JpemF0aW9uIl0gPSBhdXRoCiAgICBlbGlmIE9QRU5BSV9BUElfS0VZOgogICAgICAgIGhlYWRlcnNbIkF1dGhvcml6YXRpb24iXSA9IGYiQmVhcmVyIHtPUEVOQUlfQVBJX0tFWX0iCiAgICBjdHlwZSA9IGluY29taW5nLmdldCgiY29udGVudC10eXBlIikgb3IgaW5jb21pbmcuZ2V0KCJDb250ZW50LVR5cGUiKSBvciAiYXBwbGljYXRpb24vanNvbiIKICAgIGhlYWRlcnNbIkNvbnRlbnQtVHlwZSJdID0gY3R5cGUKICAgIHJldHVybiBoZWFkZXJzCgoKZGVmIGJ1aWxkX3RhcmdldChwYXRoOiBzdHIpIC0+IHN0cjoKICAgIGJhc2UgPSBPUEVOQUlfQkFTRV9VUkwKICAgIGlmIGJhc2UuZW5kc3dpdGgoIi92MSIpIG9yIGJhc2UuZW5kc3dpdGgoIi92MS8iKToKICAgICAgICBiYXNlID0gYmFzZVs6LTNdIGlmIGJhc2UuZW5kc3dpdGgoIi92MSIpIGVsc2UgYmFzZVs6LTRdCiAgICByZXR1cm4gdXJsam9pbihiYXNlICsgIi8iLCBwYXRoLmxzdHJpcCgiLyIpKQoKCmRlZiBmb3J3YXJkX2pzb24ocGF0aDogc3RyLCBib2R5OiBkaWN0LCBoZWFkZXJzOiBkaWN0LCBxdWVyeTogZGljdCk6CiAgICB0YXJnZXQgPSBidWlsZF90YXJnZXQocGF0aCkKICAgIHJlc3AgPSByZXF1ZXN0cy5wb3N0KAogICAgICAgIHRhcmdldCwKICAgICAgICBoZWFkZXJzPWhlYWRlcnMsCiAgICAgICAgcGFyYW1zPXF1ZXJ5LAogICAgICAgIGpzb249Ym9keSwKICAgICAgICB0aW1lb3V0PTYwLAogICAgKQogICAgcmV0dXJuIHJlc3AKCkBhcHAuZ2V0KCIvIikKZGVmIGhlYWx0aCgpOgogICAgcmV0dXJuIHsic3RhdHVzIjogIm9rIn0KCgojIHJlbGF4ZWQgY2hhdCBlbmRwb2ludCwgYWNjZXB0cyBhbnkgSlNPTiB0aGF0IGluY2x1ZGVzIG1lc3NhZ2VzCkBhcHAucG9zdCgiL3YxL2NoYXQvY29tcGxldGlvbnMiKQphc3luYyBkZWYgY2hhdF9jb21wbGV0aW9ucygKICAgIHJlcXVlc3Q6IFJlcXVlc3QsCiAgICBwYXlsb2FkOiBEaWN0W3N0ciwgQW55XSA9IEJvZHkoLi4uKSwKKToKICAgIGlmICJtZXNzYWdlcyIgbm90IGluIHBheWxvYWQgb3Igbm90IGlzaW5zdGFuY2UocGF5bG9hZFsibWVzc2FnZXMiXSwgbGlzdCk6CiAgICAgICAgcmV0dXJuIFJlc3BvbnNlKAogICAgICAgICAgICBjb250ZW50PWpzb24uZHVtcHMoeyJlcnJvciI6ICJtZXNzYWdlcyBtdXN0IGJlIGEgbGlzdCBvZiBjaGF0IG1lc3NhZ2VzIn0pLAogICAgICAgICAgICBzdGF0dXNfY29kZT00MDAsCiAgICAgICAgICAgIG1lZGlhX3R5cGU9ImFwcGxpY2F0aW9uL2pzb24iLAogICAgICAgICkKCiAgICBpZiAibW9kZWwiIG5vdCBpbiBwYXlsb2FkIG9yIHBheWxvYWRbIm1vZGVsIl0gaXMgTm9uZToKICAgICAgICBwYXlsb2FkWyJtb2RlbCJdID0gT1BFTkFJX0RFRkFVTFRfTU9ERUwKCiAgICBoZWFkZXJzID0gYnVpbGRfaGVhZGVycyhkaWN0KHJlcXVlc3QuaGVhZGVycykpCiAgICByZXNwID0gZm9yd2FyZF9qc29uKCIvdjEvY2hhdC9jb21wbGV0aW9ucyIsIHBheWxvYWQsIGhlYWRlcnMsIGRpY3QocmVxdWVzdC5xdWVyeV9wYXJhbXMpKQogICAgcmV0dXJuIFJlc3BvbnNlKAogICAgICAgIGNvbnRlbnQ9cmVzcC5jb250ZW50LAogICAgICAgIHN0YXR1c19jb2RlPXJlc3Auc3RhdHVzX2NvZGUsCiAgICAgICAgbWVkaWFfdHlwZT1yZXNwLmhlYWRlcnMuZ2V0KCJDb250ZW50LVR5cGUiLCAiYXBwbGljYXRpb24vanNvbiIpLAogICAgKQoKCkBhcHAucG9zdCgiL3YxL2VtYmVkZGluZ3MiKQphc3luYyBkZWYgZW1iZWRkaW5ncygKICAgIHJlcXVlc3Q6IFJlcXVlc3QsCiAgICBwYXlsb2FkOiBEaWN0W3N0ciwgQW55XSA9IEJvZHkoLi4uKSwKKToKICAgIGlmICJtb2RlbCIgbm90IGluIHBheWxvYWQgb3Igbm90IHBheWxvYWRbIm1vZGVsIl06CiAgICAgICAgcGF5bG9hZFsibW9kZWwiXSA9ICJ0ZXh0LWVtYmVkZGluZy0zLXNtYWxsIgogICAgaGVhZGVycyA9IGJ1aWxkX2hlYWRlcnMoZGljdChyZXF1ZXN0LmhlYWRlcnMpKQogICAgcmVzcCA9IGZvcndhcmRfanNvbigiL3YxL2VtYmVkZGluZ3MiLCBwYXlsb2FkLCBoZWFkZXJzLCBkaWN0KHJlcXVlc3QucXVlcnlfcGFyYW1zKSkKICAgIHJldHVybiBSZXNwb25zZSgKICAgICAgICBjb250ZW50PXJlc3AuY29udGVudCwKICAgICAgICBzdGF0dXNfY29kZT1yZXNwLnN0YXR1c19jb2RlLAogICAgICAgIG1lZGlhX3R5cGU9cmVzcC5oZWFkZXJzLmdldCgiQ29udGVudC1UeXBlIiwgImFwcGxpY2F0aW9uL2pzb24iKSwKICAgICkKCgpAYXBwLnBvc3QoIi92MS9yZXNwb25zZXMiKQphc3luYyBkZWYgcmVzcG9uc2VzX2FwaSgKICAgIHJlcXVlc3Q6IFJlcXVlc3QsCiAgICBwYXlsb2FkOiBEaWN0W3N0ciwgQW55XSA9IEJvZHkoLi4uKSwKKToKICAgIGlmICJtb2RlbCIgbm90IGluIHBheWxvYWQgb3IgcGF5bG9hZFsibW9kZWwiXSBpcyBOb25lOgogICAgICAgIHBheWxvYWRbIm1vZGVsIl0gPSBPUEVOQUlfREVGQVVMVF9NT0RFTAogICAgaGVhZGVycyA9IGJ1aWxkX2hlYWRlcnMoZGljdChyZXF1ZXN0LmhlYWRlcnMpKQogICAgcmVzcCA9IGZvcndhcmRfanNvbigiL3YxL3Jlc3BvbnNlcyIsIHBheWxvYWQsIGhlYWRlcnMsIGRpY3QocmVxdWVzdC5xdWVyeV9wYXJhbXMpKQogICAgcmV0dXJuIFJlc3BvbnNlKAogICAgICAgIGNvbnRlbnQ9cmVzcC5jb250ZW50LAogICAgICAgIHN0YXR1c19jb2RlPXJlc3Auc3RhdHVzX2NvZGUsCiAgICAgICAgbWVkaWFfdHlwZT1yZXNwLmhlYWRlcnMuZ2V0KCJDb250ZW50LVR5cGUiLCAiYXBwbGljYXRpb24vanNvbiIpLAogICAgKQoKCiMgLS0tLS0tLS0tLS0tLS0tLSBjbGllbnQgLS0tLS0tLS0tLS0tLS0tLQpjbGFzcyBPcGVuQUlQcm94eUNsaWVudDoKICAgICIiIgogICAgU2ltcGxlIGNsaWVudCBmb3IgdGhlIGxvY2FsIHByb3h5LgogICAgRGVmYXVsdCBiYXNlIHVybCBpcyBodHRwOi8vbG9jYWxob3N0OjgwMDAKICAgIElmIGFwaV9rZXkgaXMgbm90IHByb3ZpZGVkLCBpdCB1c2VzIE9QRU5BSV9BUElfS0VZIGZyb20gZW52aXJvbm1lbnQuCiAgICAiIiIKCiAgICBkZWYgX19pbml0X18oc2VsZiwgYmFzZV91cmw6IHN0ciA9ICJodHRwOi8vbG9jYWxob3N0OjgwMDAiLCBhcGlfa2V5OiBPcHRpb25hbFtzdHJdID0gTm9uZSk6CiAgICAgICAgc2VsZi5iYXNlX3VybCA9IGJhc2VfdXJsLnJzdHJpcCgiLyIpCiAgICAgICAgc2VsZi5hcGlfa2V5ID0gYXBpX2tleQoKICAgIGRlZiBfaGVhZGVycyhzZWxmKSAtPiBEaWN0W3N0ciwgc3RyXToKICAgICAgICBoZWFkZXJzID0geyJDb250ZW50LVR5cGUiOiAiYXBwbGljYXRpb24vanNvbiJ9CiAgICAgICAga2V5ID0gc2VsZi5hcGlfa2V5IG9yIG9zLmdldGVudigiT1BFTkFJX0FQSV9LRVkiLCAiIikKICAgICAgICBpZiBrZXk6CiAgICAgICAgICAgIGhlYWRlcnNbIkF1dGhvcml6YXRpb24iXSA9IGYiQmVhcmVyIHtrZXl9IgogICAgICAgIHJldHVybiBoZWFkZXJzCgogICAgZGVmIGNoYXQoc2VsZiwgbWVzc2FnZXM6IExpc3RbRGljdFtzdHIsIHN0cl1dLCBtb2RlbDogT3B0aW9uYWxbc3RyXSA9IE5vbmUpIC0+IERpY3Rbc3RyLCBBbnldOgogICAgICAgIGJvZHk6IERpY3Rbc3RyLCBBbnldID0geyJtZXNzYWdlcyI6IG1lc3NhZ2VzfQogICAgICAgIGlmIG1vZGVsOgogICAgICAgICAgICBib2R5WyJtb2RlbCJdID0gbW9kZWwKICAgICAgICByZXNwID0gcmVxdWVzdHMucG9zdCgKICAgICAgICAgICAgZiJ7c2VsZi5iYXNlX3VybH0vdjEvY2hhdC9jb21wbGV0aW9ucyIsCiAgICAgICAgICAgIGhlYWRlcnM9c2VsZi5faGVhZGVycygpLAogICAgICAgICAgICBqc29uPWJvZHksCiAgICAgICAgICAgIHRpbWVvdXQ9NjAsCiAgICAgICAgKQogICAgICAgIHJlc3AucmFpc2VfZm9yX3N0YXR1cygpCiAgICAgICAgcmV0dXJuIHJlc3AuanNvbigpCgogICAgZGVmIGVtYmVkZGluZ3Moc2VsZiwgdGV4dDogQW55LCBtb2RlbDogT3B0aW9uYWxbc3RyXSA9IE5vbmUpIC0+IERpY3Rbc3RyLCBBbnldOgogICAgICAgIGJvZHk6IERpY3Rbc3RyLCBBbnldID0geyJpbnB1dCI6IHRleHR9CiAgICAgICAgaWYgbW9kZWw6CiAgICAgICAgICAgIGJvZHlbIm1vZGVsIl0gPSBtb2RlbAogICAgICAgIHJlc3AgPSByZXF1ZXN0cy5wb3N0KAogICAgICAgICAgICBmIntzZWxmLmJhc2VfdXJsfS92MS9lbWJlZGRpbmdzIiwKICAgICAgICAgICAgaGVhZGVycz1zZWxmLl9oZWFkZXJzKCksCiAgICAgICAgICAgIGpzb249Ym9keSwKICAgICAgICAgICAgdGltZW91dD02MCwKICAgICAgICApCiAgICAgICAgcmVzcC5yYWlzZV9mb3Jfc3RhdHVzKCkKICAgICAgICByZXR1cm4gcmVzcC5qc29uKCkKCiAgICBkZWYgcmVzcG9uc2VzKHNlbGYsIGlucHV0X3RleHQ6IEFueSwgbW9kZWw6IE9wdGlvbmFsW3N0cl0gPSBOb25lKSAtPiBEaWN0W3N0ciwgQW55XToKICAgICAgICBib2R5OiBEaWN0W3N0ciwgQW55XSA9IHsiaW5wdXQiOiBpbnB1dF90ZXh0fQogICAgICAgIGlmIG1vZGVsOgogICAgICAgICAgICBib2R5WyJtb2RlbCJdID0gbW9kZWwKICAgICAgICByZXNwID0gcmVxdWVzdHMucG9zdCgKICAgICAgICAgICAgZiJ7c2VsZi5iYXNlX3VybH0vdjEvcmVzcG9uc2VzIiwKICAgICAgICAgICAgaGVhZGVycz1zZWxmLl9oZWFkZXJzKCksCiAgICAgICAgICAgIGpzb249Ym9keSwKICAgICAgICAgICAgdGltZW91dD02MCwKICAgICAgICApCiAgICAgICAgcmVzcC5yYWlzZV9mb3Jfc3RhdHVzKCkKICAgICAgICByZXR1cm4gcmVzcC5qc29uKCkKCgojIG9wdGlvbmFsIHF1aWNrIHNlbGYgdGVzdCB3aGVuIHJ1bm5pbmcgdGhpcyBmaWxlIGRpcmVjdGx5CmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICAjIHN0YXJ0IHRoZSBzZXJ2ZXIgaW4gYW5vdGhlciB0ZXJtaW5hbCBmaXJzdDoKICAgICMgdXZpY29ybiBvcGVuYWlfcHJveHkub3BlbmFpOmFwcCAtLWhvc3QgMC4wLjAuMCAtLXBvcnQgODAwMCAtLXJlbG9hZAogICAgYyA9IE9wZW5BSVByb3h5Q2xpZW50KCkKICAgIHRyeToKICAgICAgICBwcmludCgiSGVhbHRoOiIsIHJlcXVlc3RzLmdldChmIntjLmJhc2VfdXJsfS8iKS5qc29uKCkpCiAgICBleGNlcHQgRXhjZXB0aW9uIGFzIGU6CiAgICAgICAgcHJpbnQoIlNlcnZlciBub3QgcnVubmluZzoiLCBlKQo=" +CMD = r''' +set -e +python - <<'PY' +import os, base64, pathlib +code = os.environ["BASE64"] +pathlib.Path("/opt/app").mkdir(parents=True, exist_ok=True) +with open("/opt/app/openai.py","wb") as f: + f.write(base64.b64decode(code)) +print("Wrote /opt/app/openai.py") +PY + +exec gunicorn openai:app \ + --chdir /opt/app \ + --bind 0.0.0.0:8000 \ + --worker-class uvicorn.workers.UvicornWorker \ + --log-level info +'''.strip() +class OpenAIModule: + def __init__(self,project): + self.project = project + self.fastapi_app = self.project.set_function(name="openai",kind="application",image="python:3.11") + self.fastapi_app.with_requirements([ + "fastapi>=0.110,<1.0", + "uvicorn[standard]>=0.29,<1.0", + "gunicorn>=21.2,<22.0", + "requests>=2.31,<3.0", + ]) + self.fastapi_app.set_env("BASE64",BASE64) + self.fastapi_app.set_internal_application_port(8000) + self.fastapi_app.spec.command = "/bin/sh" + self.fastapi_app.spec.args = ["-c", CMD] + + + + + From 011e48a978b1932e7c368e77c4e3dff0dc387136 Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 14:46:00 +0200 Subject: [PATCH 11/19] Change deployment method to OpenAIModule --- modules/src/openai_proxy/openai.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/src/openai_proxy/openai.ipynb b/modules/src/openai_proxy/openai.ipynb index 3c2d456a..bf68b749 100644 --- a/modules/src/openai_proxy/openai.ipynb +++ b/modules/src/openai_proxy/openai.ipynb @@ -58,7 +58,7 @@ "metadata": {}, "outputs": [], "source": [ - "app.fastapi_app.deploy()" + "app.OpenAIModule.deploy()" ] } ], From 434d1ff27c743c119cb515c00aa5afddb297829b Mon Sep 17 00:00:00 2001 From: Guy_Leibu Date: Sun, 16 Nov 2025 14:53:36 +0200 Subject: [PATCH 12/19] Third commit adding notebook --- modules/src/openai_proxy/openai.ipynb | 30 ++++++++------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/modules/src/openai_proxy/openai.ipynb b/modules/src/openai_proxy/openai.ipynb index 3c2d456a..58445f1e 100644 --- a/modules/src/openai_proxy/openai.ipynb +++ b/modules/src/openai_proxy/openai.ipynb @@ -1,44 +1,32 @@ { "cells": [ { - "cell_type": "code", - "execution_count": null, - "id": "967b4d5d-7250-40bf-8149-de11e1e3244c", + "cell_type": "markdown", + "id": "220629c8-17aa-45f6-ac81-0ca31e165412", "metadata": {}, - "outputs": [], "source": [ - "import mlrun\n", - "import pandas as pd" + "# OpenAI Module Demo" ] }, { "cell_type": "code", "execution_count": null, - "id": "17d208f4-a00a-42ef-a849-0fa79bed10cb", - "metadata": {}, - "outputs": [], - "source": [ - "project = mlrun.get_or_create_project(\"fastapi-openai\",user_project=True,context=\"./src\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5f2d4ba7-3f6a-41f6-a030-e30deeda2888", + "id": "967b4d5d-7250-40bf-8149-de11e1e3244c", "metadata": {}, "outputs": [], "source": [ - "from openai_app import OpenAIModule" + "import mlrun\n", + "import pandas as pd" ] }, { "cell_type": "code", "execution_count": null, - "id": "6432b825-3c2a-4ac9-ada7-74c3cfe5d949", + "id": "17d208f4-a00a-42ef-a849-0fa79bed10cb", "metadata": {}, "outputs": [], "source": [ - "app = OpenAIModule(project=project)" + "project = mlrun.get_or_create_project(\"fastapi-openai\",user_project=True,context=\"./src\")" ] }, { @@ -58,7 +46,7 @@ "metadata": {}, "outputs": [], "source": [ - "app.fastapi_app.deploy()" + "res = app.OpenAIModule.deploy()" ] } ], From c19e04795c2754b1fe48f7be0178dd2d1ae4ecb0 Mon Sep 17 00:00:00 2001 From: Guy_Leibu Date: Sun, 16 Nov 2025 14:55:09 +0200 Subject: [PATCH 13/19] Third commit adding notebook --- modules/src/openai_proxy/openai.ipynb | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/src/openai_proxy/openai.ipynb b/modules/src/openai_proxy/openai.ipynb index 58445f1e..123934fb 100644 --- a/modules/src/openai_proxy/openai.ipynb +++ b/modules/src/openai_proxy/openai.ipynb @@ -45,9 +45,7 @@ "id": "93e67d6a-5f53-4bda-b0b5-4e2977088139", "metadata": {}, "outputs": [], - "source": [ - "res = app.OpenAIModule.deploy()" - ] + "source": "app.OpenAIModule.deploy()" } ], "metadata": { From 39303424571bb9c941d49f733396bda92d2a766a Mon Sep 17 00:00:00 2001 From: guylei-code Date: Sun, 16 Nov 2025 15:13:20 +0200 Subject: [PATCH 14/19] Remove package requirements from item.yaml Removed specific requirements for fastapi and requests. --- modules/src/openai_proxy/item.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy/item.yaml index 98f188b0..895d19da 100644 --- a/modules/src/openai_proxy/item.yaml +++ b/modules/src/openai_proxy/item.yaml @@ -13,7 +13,4 @@ spec: filename: openai.py image: mlrun/mlrun kind: general - requirements: - - fastapi>=0.110,<1.0 - - requests>=2.31,<3.0 version: 1.0.0 From bc46fa27353fc27d24136fdb17969c863e806490 Mon Sep 17 00:00:00 2001 From: guylei-code Date: Mon, 17 Nov 2025 13:43:38 +0200 Subject: [PATCH 15/19] Rename item and update kind in YAML --- modules/src/openai_proxy/item.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy/item.yaml index 895d19da..50dc2209 100644 --- a/modules/src/openai_proxy/item.yaml +++ b/modules/src/openai_proxy/item.yaml @@ -8,9 +8,9 @@ hidden: false labels: author: Iguazio mlrunVersion: 1.10.0 -name: openai +name: openai_proxy_app spec: filename: openai.py image: mlrun/mlrun - kind: general + kind: generic version: 1.0.0 From 1a7bf252002209700f44478ee6b632a23b2afd25 Mon Sep 17 00:00:00 2001 From: guylei-code Date: Mon, 17 Nov 2025 14:16:06 +0200 Subject: [PATCH 16/19] Update openai.py --- modules/src/openai_proxy/openai.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/src/openai_proxy/openai.py b/modules/src/openai_proxy/openai.py index f45658ed..6afad490 100644 --- a/modules/src/openai_proxy/openai.py +++ b/modules/src/openai_proxy/openai.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - +#This module acts as a lightweight gateway to OpenAI-compatible APIs. +#You can send chat prompts, create embeddings, or get model responses without worrying about authentication or endpoint differences. +#It simplifies access so you can test, analyze, or integrate AI features directly into your projects or notebooks with minimal setup. BASE64 = "IyBvcGVuYWlfcHJveHkvb3BlbmFpLnB5CgppbXBvcnQgb3MKaW1wb3J0IGpzb24KZnJvbSB1cmxsaWIucGFyc2UgaW1wb3J0IHVybGpvaW4KZnJvbSB0eXBpbmcgaW1wb3J0IEFueSwgRGljdCwgTGlzdCwgT3B0aW9uYWwKCmltcG9ydCByZXF1ZXN0cwpmcm9tIGZhc3RhcGkgaW1wb3J0IEZhc3RBUEksIFJlcXVlc3QsIFJlc3BvbnNlLCBCb2R5CgphcHAgPSBGYXN0QVBJKAogICAgdGl0bGU9Ik9wZW5BSSBQcm94eSBBcHAiLAogICAgZGVzY3JpcHRpb249IkxvY2FsIEZhc3RBUEkgcHJveHkgZm9yIE9wZW5BSSBzdHlsZSBlbmRwb2ludHMiLAogICAgdmVyc2lvbj0iMS4wLjAiLAopCgpPUEVOQUlfQkFTRV9VUkwgPSBvcy5nZXRlbnYoIk9QRU5BSV9CQVNFX1VSTCIsICJodHRwczovL2FwaS5vcGVuYWkuY29tIikucnN0cmlwKCIvIikKT1BFTkFJX0FQSV9LRVkgPSBvcy5nZXRlbnYoIk9QRU5BSV9BUElfS0VZIiwgIiIpCk9QRU5BSV9ERUZBVUxUX01PREVMID0gb3MuZ2V0ZW52KCJPUEVOQUlfREVGQVVMVF9NT0RFTCIsICJncHQtNG8tbWluaSIpCgoKZGVmIGJ1aWxkX2hlYWRlcnMoaW5jb21pbmc6IGRpY3QpIC0+IGRpY3Q6CiAgICBoZWFkZXJzID0ge30KICAgIGF1dGggPSBpbmNvbWluZy5nZXQoImF1dGhvcml6YXRpb24iKSBvciBpbmNvbWluZy5nZXQoIkF1dGhvcml6YXRpb24iKQogICAgaWYgYXV0aDoKICAgICAgICBoZWFkZXJzWyJBdXRob3JpemF0aW9uIl0gPSBhdXRoCiAgICBlbGlmIE9QRU5BSV9BUElfS0VZOgogICAgICAgIGhlYWRlcnNbIkF1dGhvcml6YXRpb24iXSA9IGYiQmVhcmVyIHtPUEVOQUlfQVBJX0tFWX0iCiAgICBjdHlwZSA9IGluY29taW5nLmdldCgiY29udGVudC10eXBlIikgb3IgaW5jb21pbmcuZ2V0KCJDb250ZW50LVR5cGUiKSBvciAiYXBwbGljYXRpb24vanNvbiIKICAgIGhlYWRlcnNbIkNvbnRlbnQtVHlwZSJdID0gY3R5cGUKICAgIHJldHVybiBoZWFkZXJzCgoKZGVmIGJ1aWxkX3RhcmdldChwYXRoOiBzdHIpIC0+IHN0cjoKICAgIGJhc2UgPSBPUEVOQUlfQkFTRV9VUkwKICAgIGlmIGJhc2UuZW5kc3dpdGgoIi92MSIpIG9yIGJhc2UuZW5kc3dpdGgoIi92MS8iKToKICAgICAgICBiYXNlID0gYmFzZVs6LTNdIGlmIGJhc2UuZW5kc3dpdGgoIi92MSIpIGVsc2UgYmFzZVs6LTRdCiAgICByZXR1cm4gdXJsam9pbihiYXNlICsgIi8iLCBwYXRoLmxzdHJpcCgiLyIpKQoKCmRlZiBmb3J3YXJkX2pzb24ocGF0aDogc3RyLCBib2R5OiBkaWN0LCBoZWFkZXJzOiBkaWN0LCBxdWVyeTogZGljdCk6CiAgICB0YXJnZXQgPSBidWlsZF90YXJnZXQocGF0aCkKICAgIHJlc3AgPSByZXF1ZXN0cy5wb3N0KAogICAgICAgIHRhcmdldCwKICAgICAgICBoZWFkZXJzPWhlYWRlcnMsCiAgICAgICAgcGFyYW1zPXF1ZXJ5LAogICAgICAgIGpzb249Ym9keSwKICAgICAgICB0aW1lb3V0PTYwLAogICAgKQogICAgcmV0dXJuIHJlc3AKCkBhcHAuZ2V0KCIvIikKZGVmIGhlYWx0aCgpOgogICAgcmV0dXJuIHsic3RhdHVzIjogIm9rIn0KCgojIHJlbGF4ZWQgY2hhdCBlbmRwb2ludCwgYWNjZXB0cyBhbnkgSlNPTiB0aGF0IGluY2x1ZGVzIG1lc3NhZ2VzCkBhcHAucG9zdCgiL3YxL2NoYXQvY29tcGxldGlvbnMiKQphc3luYyBkZWYgY2hhdF9jb21wbGV0aW9ucygKICAgIHJlcXVlc3Q6IFJlcXVlc3QsCiAgICBwYXlsb2FkOiBEaWN0W3N0ciwgQW55XSA9IEJvZHkoLi4uKSwKKToKICAgIGlmICJtZXNzYWdlcyIgbm90IGluIHBheWxvYWQgb3Igbm90IGlzaW5zdGFuY2UocGF5bG9hZFsibWVzc2FnZXMiXSwgbGlzdCk6CiAgICAgICAgcmV0dXJuIFJlc3BvbnNlKAogICAgICAgICAgICBjb250ZW50PWpzb24uZHVtcHMoeyJlcnJvciI6ICJtZXNzYWdlcyBtdXN0IGJlIGEgbGlzdCBvZiBjaGF0IG1lc3NhZ2VzIn0pLAogICAgICAgICAgICBzdGF0dXNfY29kZT00MDAsCiAgICAgICAgICAgIG1lZGlhX3R5cGU9ImFwcGxpY2F0aW9uL2pzb24iLAogICAgICAgICkKCiAgICBpZiAibW9kZWwiIG5vdCBpbiBwYXlsb2FkIG9yIHBheWxvYWRbIm1vZGVsIl0gaXMgTm9uZToKICAgICAgICBwYXlsb2FkWyJtb2RlbCJdID0gT1BFTkFJX0RFRkFVTFRfTU9ERUwKCiAgICBoZWFkZXJzID0gYnVpbGRfaGVhZGVycyhkaWN0KHJlcXVlc3QuaGVhZGVycykpCiAgICByZXNwID0gZm9yd2FyZF9qc29uKCIvdjEvY2hhdC9jb21wbGV0aW9ucyIsIHBheWxvYWQsIGhlYWRlcnMsIGRpY3QocmVxdWVzdC5xdWVyeV9wYXJhbXMpKQogICAgcmV0dXJuIFJlc3BvbnNlKAogICAgICAgIGNvbnRlbnQ9cmVzcC5jb250ZW50LAogICAgICAgIHN0YXR1c19jb2RlPXJlc3Auc3RhdHVzX2NvZGUsCiAgICAgICAgbWVkaWFfdHlwZT1yZXNwLmhlYWRlcnMuZ2V0KCJDb250ZW50LVR5cGUiLCAiYXBwbGljYXRpb24vanNvbiIpLAogICAgKQoKCkBhcHAucG9zdCgiL3YxL2VtYmVkZGluZ3MiKQphc3luYyBkZWYgZW1iZWRkaW5ncygKICAgIHJlcXVlc3Q6IFJlcXVlc3QsCiAgICBwYXlsb2FkOiBEaWN0W3N0ciwgQW55XSA9IEJvZHkoLi4uKSwKKToKICAgIGlmICJtb2RlbCIgbm90IGluIHBheWxvYWQgb3Igbm90IHBheWxvYWRbIm1vZGVsIl06CiAgICAgICAgcGF5bG9hZFsibW9kZWwiXSA9ICJ0ZXh0LWVtYmVkZGluZy0zLXNtYWxsIgogICAgaGVhZGVycyA9IGJ1aWxkX2hlYWRlcnMoZGljdChyZXF1ZXN0LmhlYWRlcnMpKQogICAgcmVzcCA9IGZvcndhcmRfanNvbigiL3YxL2VtYmVkZGluZ3MiLCBwYXlsb2FkLCBoZWFkZXJzLCBkaWN0KHJlcXVlc3QucXVlcnlfcGFyYW1zKSkKICAgIHJldHVybiBSZXNwb25zZSgKICAgICAgICBjb250ZW50PXJlc3AuY29udGVudCwKICAgICAgICBzdGF0dXNfY29kZT1yZXNwLnN0YXR1c19jb2RlLAogICAgICAgIG1lZGlhX3R5cGU9cmVzcC5oZWFkZXJzLmdldCgiQ29udGVudC1UeXBlIiwgImFwcGxpY2F0aW9uL2pzb24iKSwKICAgICkKCgpAYXBwLnBvc3QoIi92MS9yZXNwb25zZXMiKQphc3luYyBkZWYgcmVzcG9uc2VzX2FwaSgKICAgIHJlcXVlc3Q6IFJlcXVlc3QsCiAgICBwYXlsb2FkOiBEaWN0W3N0ciwgQW55XSA9IEJvZHkoLi4uKSwKKToKICAgIGlmICJtb2RlbCIgbm90IGluIHBheWxvYWQgb3IgcGF5bG9hZFsibW9kZWwiXSBpcyBOb25lOgogICAgICAgIHBheWxvYWRbIm1vZGVsIl0gPSBPUEVOQUlfREVGQVVMVF9NT0RFTAogICAgaGVhZGVycyA9IGJ1aWxkX2hlYWRlcnMoZGljdChyZXF1ZXN0LmhlYWRlcnMpKQogICAgcmVzcCA9IGZvcndhcmRfanNvbigiL3YxL3Jlc3BvbnNlcyIsIHBheWxvYWQsIGhlYWRlcnMsIGRpY3QocmVxdWVzdC5xdWVyeV9wYXJhbXMpKQogICAgcmV0dXJuIFJlc3BvbnNlKAogICAgICAgIGNvbnRlbnQ9cmVzcC5jb250ZW50LAogICAgICAgIHN0YXR1c19jb2RlPXJlc3Auc3RhdHVzX2NvZGUsCiAgICAgICAgbWVkaWFfdHlwZT1yZXNwLmhlYWRlcnMuZ2V0KCJDb250ZW50LVR5cGUiLCAiYXBwbGljYXRpb24vanNvbiIpLAogICAgKQoKCiMgLS0tLS0tLS0tLS0tLS0tLSBjbGllbnQgLS0tLS0tLS0tLS0tLS0tLQpjbGFzcyBPcGVuQUlQcm94eUNsaWVudDoKICAgICIiIgogICAgU2ltcGxlIGNsaWVudCBmb3IgdGhlIGxvY2FsIHByb3h5LgogICAgRGVmYXVsdCBiYXNlIHVybCBpcyBodHRwOi8vbG9jYWxob3N0OjgwMDAKICAgIElmIGFwaV9rZXkgaXMgbm90IHByb3ZpZGVkLCBpdCB1c2VzIE9QRU5BSV9BUElfS0VZIGZyb20gZW52aXJvbm1lbnQuCiAgICAiIiIKCiAgICBkZWYgX19pbml0X18oc2VsZiwgYmFzZV91cmw6IHN0ciA9ICJodHRwOi8vbG9jYWxob3N0OjgwMDAiLCBhcGlfa2V5OiBPcHRpb25hbFtzdHJdID0gTm9uZSk6CiAgICAgICAgc2VsZi5iYXNlX3VybCA9IGJhc2VfdXJsLnJzdHJpcCgiLyIpCiAgICAgICAgc2VsZi5hcGlfa2V5ID0gYXBpX2tleQoKICAgIGRlZiBfaGVhZGVycyhzZWxmKSAtPiBEaWN0W3N0ciwgc3RyXToKICAgICAgICBoZWFkZXJzID0geyJDb250ZW50LVR5cGUiOiAiYXBwbGljYXRpb24vanNvbiJ9CiAgICAgICAga2V5ID0gc2VsZi5hcGlfa2V5IG9yIG9zLmdldGVudigiT1BFTkFJX0FQSV9LRVkiLCAiIikKICAgICAgICBpZiBrZXk6CiAgICAgICAgICAgIGhlYWRlcnNbIkF1dGhvcml6YXRpb24iXSA9IGYiQmVhcmVyIHtrZXl9IgogICAgICAgIHJldHVybiBoZWFkZXJzCgogICAgZGVmIGNoYXQoc2VsZiwgbWVzc2FnZXM6IExpc3RbRGljdFtzdHIsIHN0cl1dLCBtb2RlbDogT3B0aW9uYWxbc3RyXSA9IE5vbmUpIC0+IERpY3Rbc3RyLCBBbnldOgogICAgICAgIGJvZHk6IERpY3Rbc3RyLCBBbnldID0geyJtZXNzYWdlcyI6IG1lc3NhZ2VzfQogICAgICAgIGlmIG1vZGVsOgogICAgICAgICAgICBib2R5WyJtb2RlbCJdID0gbW9kZWwKICAgICAgICByZXNwID0gcmVxdWVzdHMucG9zdCgKICAgICAgICAgICAgZiJ7c2VsZi5iYXNlX3VybH0vdjEvY2hhdC9jb21wbGV0aW9ucyIsCiAgICAgICAgICAgIGhlYWRlcnM9c2VsZi5faGVhZGVycygpLAogICAgICAgICAgICBqc29uPWJvZHksCiAgICAgICAgICAgIHRpbWVvdXQ9NjAsCiAgICAgICAgKQogICAgICAgIHJlc3AucmFpc2VfZm9yX3N0YXR1cygpCiAgICAgICAgcmV0dXJuIHJlc3AuanNvbigpCgogICAgZGVmIGVtYmVkZGluZ3Moc2VsZiwgdGV4dDogQW55LCBtb2RlbDogT3B0aW9uYWxbc3RyXSA9IE5vbmUpIC0+IERpY3Rbc3RyLCBBbnldOgogICAgICAgIGJvZHk6IERpY3Rbc3RyLCBBbnldID0geyJpbnB1dCI6IHRleHR9CiAgICAgICAgaWYgbW9kZWw6CiAgICAgICAgICAgIGJvZHlbIm1vZGVsIl0gPSBtb2RlbAogICAgICAgIHJlc3AgPSByZXF1ZXN0cy5wb3N0KAogICAgICAgICAgICBmIntzZWxmLmJhc2VfdXJsfS92MS9lbWJlZGRpbmdzIiwKICAgICAgICAgICAgaGVhZGVycz1zZWxmLl9oZWFkZXJzKCksCiAgICAgICAgICAgIGpzb249Ym9keSwKICAgICAgICAgICAgdGltZW91dD02MCwKICAgICAgICApCiAgICAgICAgcmVzcC5yYWlzZV9mb3Jfc3RhdHVzKCkKICAgICAgICByZXR1cm4gcmVzcC5qc29uKCkKCiAgICBkZWYgcmVzcG9uc2VzKHNlbGYsIGlucHV0X3RleHQ6IEFueSwgbW9kZWw6IE9wdGlvbmFsW3N0cl0gPSBOb25lKSAtPiBEaWN0W3N0ciwgQW55XToKICAgICAgICBib2R5OiBEaWN0W3N0ciwgQW55XSA9IHsiaW5wdXQiOiBpbnB1dF90ZXh0fQogICAgICAgIGlmIG1vZGVsOgogICAgICAgICAgICBib2R5WyJtb2RlbCJdID0gbW9kZWwKICAgICAgICByZXNwID0gcmVxdWVzdHMucG9zdCgKICAgICAgICAgICAgZiJ7c2VsZi5iYXNlX3VybH0vdjEvcmVzcG9uc2VzIiwKICAgICAgICAgICAgaGVhZGVycz1zZWxmLl9oZWFkZXJzKCksCiAgICAgICAgICAgIGpzb249Ym9keSwKICAgICAgICAgICAgdGltZW91dD02MCwKICAgICAgICApCiAgICAgICAgcmVzcC5yYWlzZV9mb3Jfc3RhdHVzKCkKICAgICAgICByZXR1cm4gcmVzcC5qc29uKCkKCgojIG9wdGlvbmFsIHF1aWNrIHNlbGYgdGVzdCB3aGVuIHJ1bm5pbmcgdGhpcyBmaWxlIGRpcmVjdGx5CmlmIF9fbmFtZV9fID09ICJfX21haW5fXyI6CiAgICAjIHN0YXJ0IHRoZSBzZXJ2ZXIgaW4gYW5vdGhlciB0ZXJtaW5hbCBmaXJzdDoKICAgICMgdXZpY29ybiBvcGVuYWlfcHJveHkub3BlbmFpOmFwcCAtLWhvc3QgMC4wLjAuMCAtLXBvcnQgODAwMCAtLXJlbG9hZAogICAgYyA9IE9wZW5BSVByb3h5Q2xpZW50KCkKICAgIHRyeToKICAgICAgICBwcmludCgiSGVhbHRoOiIsIHJlcXVlc3RzLmdldChmIntjLmJhc2VfdXJsfS8iKS5qc29uKCkpCiAgICBleGNlcHQgRXhjZXB0aW9uIGFzIGU6CiAgICAgICAgcHJpbnQoIlNlcnZlciBub3QgcnVubmluZzoiLCBlKQo=" From c1771757da7b96f47acec73787cde32f911d6857 Mon Sep 17 00:00:00 2001 From: Guy_Leibu Date: Mon, 17 Nov 2025 14:47:58 +0200 Subject: [PATCH 17/19] Third commit adding notebook --- .../src/openai_proxy/{openai.ipynb => openai_proxy_app.ipynb} | 0 modules/src/openai_proxy/{openai.py => openai_proxy_app.py} | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename modules/src/openai_proxy/{openai.ipynb => openai_proxy_app.ipynb} (100%) rename modules/src/openai_proxy/{openai.py => openai_proxy_app.py} (98%) diff --git a/modules/src/openai_proxy/openai.ipynb b/modules/src/openai_proxy/openai_proxy_app.ipynb similarity index 100% rename from modules/src/openai_proxy/openai.ipynb rename to modules/src/openai_proxy/openai_proxy_app.ipynb diff --git a/modules/src/openai_proxy/openai.py b/modules/src/openai_proxy/openai_proxy_app.py similarity index 98% rename from modules/src/openai_proxy/openai.py rename to modules/src/openai_proxy/openai_proxy_app.py index f45658ed..44092dd9 100644 --- a/modules/src/openai_proxy/openai.py +++ b/modules/src/openai_proxy/openai_proxy_app.py @@ -22,9 +22,9 @@ import os, base64, pathlib code = os.environ["BASE64"] pathlib.Path("/opt/app").mkdir(parents=True, exist_ok=True) -with open("/opt/app/openai.py","wb") as f: +with open("/opt/app/openai_proxy_app.py","wb") as f: f.write(base64.b64decode(code)) -print("Wrote /opt/app/openai.py") +print("Wrote /opt/app/openai_proxy_app.py") PY exec gunicorn openai:app \ From b0c75a8b60890d7fd69fd020fd4ed8b9e626ec5a Mon Sep 17 00:00:00 2001 From: Guy_Leibu Date: Mon, 17 Nov 2025 14:52:17 +0200 Subject: [PATCH 18/19] Fix after review --- modules/src/openai_proxy/item.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy/item.yaml index 50dc2209..bf295cf2 100644 --- a/modules/src/openai_proxy/item.yaml +++ b/modules/src/openai_proxy/item.yaml @@ -2,7 +2,7 @@ apiVersion: v1 categories: - genai description: OpenAI application runtime based on fastapi -example: openai.ipynb +example: openai_proxy_app.ipynb generationDate: 2025-11-11:12-25 hidden: false labels: @@ -10,7 +10,10 @@ labels: mlrunVersion: 1.10.0 name: openai_proxy_app spec: - filename: openai.py + filename: openai_proxy_app.py image: mlrun/mlrun + requirements: + - fastapi>=0.110,<1.0 + - requests>=2.31,<3.0 kind: generic version: 1.0.0 From 412a83bd6f5c44564593505b865c42281f2da10e Mon Sep 17 00:00:00 2001 From: Guy_Leibu Date: Mon, 17 Nov 2025 14:56:54 +0200 Subject: [PATCH 19/19] Fix after review --- modules/src/{openai_proxy => openai_proxy_app}/item.yaml | 0 .../src/{openai_proxy => openai_proxy_app}/openai_proxy_app.ipynb | 0 .../src/{openai_proxy => openai_proxy_app}/openai_proxy_app.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename modules/src/{openai_proxy => openai_proxy_app}/item.yaml (100%) rename modules/src/{openai_proxy => openai_proxy_app}/openai_proxy_app.ipynb (100%) rename modules/src/{openai_proxy => openai_proxy_app}/openai_proxy_app.py (100%) diff --git a/modules/src/openai_proxy/item.yaml b/modules/src/openai_proxy_app/item.yaml similarity index 100% rename from modules/src/openai_proxy/item.yaml rename to modules/src/openai_proxy_app/item.yaml diff --git a/modules/src/openai_proxy/openai_proxy_app.ipynb b/modules/src/openai_proxy_app/openai_proxy_app.ipynb similarity index 100% rename from modules/src/openai_proxy/openai_proxy_app.ipynb rename to modules/src/openai_proxy_app/openai_proxy_app.ipynb diff --git a/modules/src/openai_proxy/openai_proxy_app.py b/modules/src/openai_proxy_app/openai_proxy_app.py similarity index 100% rename from modules/src/openai_proxy/openai_proxy_app.py rename to modules/src/openai_proxy_app/openai_proxy_app.py