Skip to content

Commit

Permalink
colored logs
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil committed Apr 13, 2024
1 parent f39e02c commit 59f8217
Show file tree
Hide file tree
Showing 6 changed files with 35 additions and 25 deletions.
2 changes: 1 addition & 1 deletion example.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,5 @@
embed = TEI(config=TEIConfig(model_id="BAAI/bge-base-en-v1.5"))
output = embed.encode(["Hi, I'm an embedding model", "I'm fine, how are you?"])
print(len(output))
print("Embed:", output)
# print("Embed:", output)
embed.close()
13 changes: 9 additions & 4 deletions py_txi/inference_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,18 @@
import time
from abc import ABC
from dataclasses import asdict, dataclass, field
from logging import INFO, basicConfig, getLogger
from logging import INFO, getLogger
from typing import Any, Dict, List, Optional, Union

import coloredlogs
import docker
import docker.errors
import docker.types
from huggingface_hub import AsyncInferenceClient

from .utils import colored_json_logs, get_free_port
from .utils import get_free_port, styled_logs

basicConfig(level=INFO)
coloredlogs.install(level=INFO, fmt="[%(asctime)s][%(filename)s][%(levelname)s] %(message)s")

DOCKER = docker.from_env()
LOGGER = getLogger("Inference-Server")
Expand Down Expand Up @@ -55,6 +56,10 @@ def __post_init__(self) -> None:
LOGGER.info("\t+ Getting a free port for the server")
self.ports["80/tcp"] = (self.ports["80/tcp"][0], get_free_port())

if self.shm_size is None:
LOGGER.warning("\t+ Shared memory size not provided. Defaulting to '1g'.")
self.shm_size = "1g"


class InferenceServer(ABC):
NAME: str = "Inference-Server"
Expand Down Expand Up @@ -127,7 +132,7 @@ def __init__(self, config: InferenceServerConfig) -> None:
LOGGER.info(f"\t+ Streaming {self.NAME} server logs")
for line in self.container.logs(stream=True):
log = line.decode("utf-8").strip()
log = colored_json_logs(log)
log = styled_logs(log)

if self.SUCCESS_SENTINEL.lower() in log.lower():
LOGGER.info(f"\t+ {log}")
Expand Down
8 changes: 4 additions & 4 deletions py_txi/text_embedding_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ def __post_init__(self) -> None:
LOGGER.info("\t+ Using the latest CPU image for Text-Embedding-Inference")
self.image = "ghcr.io/huggingface/text-embeddings-inference:cpu-latest"

if is_nvidia_system() and "cpu" in self.image:
LOGGER.warning("\t+ You are running on a NVIDIA GPU system but using a CPU image.")

if self.pooling is None:
LOGGER.warning("Pooling strategy not provided. Defaulting to 'cls' pooling.")
LOGGER.warning("\t+ Pooling strategy not provided. Defaulting to 'cls' pooling.")
self.pooling = "cls"

if is_nvidia_system() and "cpu" in self.image:
LOGGER.warning("You are running on a NVIDIA GPU system but using a CPU image.")


class TEI(InferenceServer):
NAME: str = "Text-Embedding-Inference"
Expand Down
2 changes: 1 addition & 1 deletion py_txi/text_generation_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def __post_init__(self) -> None:
)

if is_rocm_system() and "rocm" not in self.image:
LOGGER.warning("You are running on a ROCm AMD GPU system but using a non-ROCM image.")
LOGGER.warning("\t+ You are running on a ROCm AMD GPU system but using a non-ROCM image.")


class TGI(InferenceServer):
Expand Down
33 changes: 19 additions & 14 deletions py_txi/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import socket
import subprocess
from datetime import datetime
from json import loads


Expand All @@ -25,32 +26,36 @@ def is_nvidia_system() -> bool:
return False


LEVEL_TO_COLOR = {
"DEBUG": "0;34m",
"INFO": "0;32m",
"WARNING": "0;33m",
"WARN": "0;33m",
"ERROR": "0;31m",
"CRITICAL": "0;31m",
LEVEL_TO_MESSAGE_STYLE = {
"DEBUG": "\033[37m",
"INFO": "\033[37m",
"WARN": "\033[33m",
"WARNING": "\033[33m",
"ERROR": "\033[31m",
"CRITICAL": "\033[31m",
}
TIMESTAMP_STYLE = "\033[32m"
TARGET_STYLE = "\033[0;38"
LEVEL_STYLE = "\033[1;30m"


def color_text(text: str, color: str) -> str:
return f"\033[{color}{text}\033[0m"
return f"{color}{text}\033[0m"


def colored_json_logs(log: str) -> str:
def styled_logs(log: str) -> str:
dict_log = loads(log)

fields = dict_log.get("fields", {})
level = dict_log.get("level", "could not parse level")
target = dict_log.get("target", "could not parse target")
timestamp = dict_log.get("timestamp", "could not parse timestamp")
message = fields.get("message", dict_log.get("message", "could not parse message"))
timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y-%m-%d %H:%M:%S")

color = LEVEL_TO_COLOR.get(level, "0;37m")
message = color_text(message, LEVEL_TO_MESSAGE_STYLE.get(level, "\033[37m"))
timestamp = color_text(timestamp, TIMESTAMP_STYLE)
target = color_text(target, TARGET_STYLE)
level = color_text(level, LEVEL_STYLE)

level = color_text(level, color)
message = color_text(message, color)

return f"[{timestamp}][{level}][{target}] - {message}"
return f"[{timestamp}][{target}][{level}] - {message}"
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
name="py-txi",
version=PY_TXI_VERSION,
packages=find_packages(),
install_requires=["docker", "huggingface-hub", "numpy", "aiohttp"],
install_requires=["docker", "huggingface-hub", "numpy", "aiohttp", "coloredlogs"],
extras_require={"quality": ["ruff"], "testing": ["pytest"]},
**common_setup_kwargs,
)

0 comments on commit 59f8217

Please sign in to comment.