diff --git a/docs/my-website/docs/proxy/debugging.md b/docs/my-website/docs/proxy/debugging.md index c5653d90f712..b9f2ba8da981 100644 --- a/docs/my-website/docs/proxy/debugging.md +++ b/docs/my-website/docs/proxy/debugging.md @@ -5,6 +5,8 @@ - debug (prints info logs) - detailed debug (prints debug logs) +The proxy also supports json logs. [See here](#json-logs) + ## `debug` **via cli** @@ -31,4 +33,20 @@ $ litellm --detailed_debug ```python os.environ["LITELLM_LOG"] = "DEBUG" -``` \ No newline at end of file +``` + +## JSON LOGS + +Set `JSON_LOGS="True"` in your env: + +```bash +export JSON_LOGS="True" +``` + +Start proxy + +```bash +$ litellm +``` + +The proxy will now all logs in json format. \ No newline at end of file diff --git a/litellm/_logging.py b/litellm/_logging.py index f31ee41f8bf7..0759ad51e959 100644 --- a/litellm/_logging.py +++ b/litellm/_logging.py @@ -1,19 +1,33 @@ -import logging +import logging, os, json +from logging import Formatter set_verbose = False -json_logs = False +json_logs = bool(os.getenv("JSON_LOGS", False)) # Create a handler for the logger (you may need to adapt this based on your needs) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) -# Create a formatter and set it for the handler -formatter = logging.Formatter( - "\033[92m%(asctime)s - %(name)s:%(levelname)s\033[0m: %(filename)s:%(lineno)s - %(message)s", - datefmt="%H:%M:%S", -) +class JsonFormatter(Formatter): + def __init__(self): + super(JsonFormatter, self).__init__() + + def format(self, record): + json_record = {} + json_record["message"] = record.getMessage() + return json.dumps(json_record) + + +# Create a formatter and set it for the handler +if json_logs: + handler.setFormatter(JsonFormatter()) +else: + formatter = logging.Formatter( + "\033[92m%(asctime)s - %(name)s:%(levelname)s\033[0m: %(filename)s:%(lineno)s - %(message)s", + datefmt="%H:%M:%S", + ) -handler.setFormatter(formatter) + handler.setFormatter(formatter) verbose_proxy_logger = logging.getLogger("LiteLLM Proxy") verbose_router_logger = logging.getLogger("LiteLLM Router") diff --git a/litellm/proxy/_logging.py b/litellm/proxy/_logging.py new file mode 100644 index 000000000000..fcabad7cd60a --- /dev/null +++ b/litellm/proxy/_logging.py @@ -0,0 +1,20 @@ +import json +import logging +from logging import Formatter + + +class JsonFormatter(Formatter): + def __init__(self): + super(JsonFormatter, self).__init__() + + def format(self, record): + json_record = {} + json_record["message"] = record.getMessage() + return json.dumps(json_record) + + +logger = logging.root +handler = logging.StreamHandler() +handler.setFormatter(JsonFormatter()) +logger.handlers = [handler] +logger.setLevel(logging.DEBUG) diff --git a/litellm/proxy/_super_secret_config.yaml b/litellm/proxy/_super_secret_config.yaml index f349bd09e9ab..2195a077d362 100644 --- a/litellm/proxy/_super_secret_config.yaml +++ b/litellm/proxy/_super_secret_config.yaml @@ -17,4 +17,4 @@ model_list: api_key: os.environ/AZURE_API_KEY # The `os.environ/` prefix tells litellm to read this from the env. See https://docs.litellm.ai/docs/simple_proxy#load-api-keys-from-vault router_settings: - enable_pre_call_checks: true + enable_pre_call_checks: true \ No newline at end of file diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index 50eca5ecb3a5..537f43736405 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -17,6 +17,7 @@ from importlib import resources import shutil + telemetry = None @@ -505,6 +506,7 @@ def _make_openai_completion(): port = random.randint(1024, 49152) from litellm.proxy.proxy_server import app + import litellm if run_gunicorn == False: if ssl_certfile_path is not None and ssl_keyfile_path is not None: @@ -519,7 +521,15 @@ def _make_openai_completion(): ssl_certfile=ssl_certfile_path, ) # run uvicorn else: - uvicorn.run(app, host=host, port=port) # run uvicorn + print(f"litellm.json_logs: {litellm.json_logs}") + if litellm.json_logs: + from litellm.proxy._logging import logger + + uvicorn.run( + app, host=host, port=port, log_config=None + ) # run uvicorn w/ json + else: + uvicorn.run(app, host=host, port=port) # run uvicorn elif run_gunicorn == True: import gunicorn.app.base