From 59f8217bedf691657aa1afca4154298963a04663 Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Sat, 13 Apr 2024 14:13:23 +0200 Subject: [PATCH] colored logs --- example.py | 2 +- py_txi/inference_server.py | 13 ++++++++---- py_txi/text_embedding_inference.py | 8 +++---- py_txi/text_generation_inference.py | 2 +- py_txi/utils.py | 33 +++++++++++++++++------------ setup.py | 2 +- 6 files changed, 35 insertions(+), 25 deletions(-) diff --git a/example.py b/example.py index de3f57b..afa2d49 100644 --- a/example.py +++ b/example.py @@ -10,5 +10,5 @@ embed = TEI(config=TEIConfig(model_id="BAAI/bge-base-en-v1.5")) output = embed.encode(["Hi, I'm an embedding model", "I'm fine, how are you?"]) print(len(output)) -print("Embed:", output) +# print("Embed:", output) embed.close() diff --git a/py_txi/inference_server.py b/py_txi/inference_server.py index d2ba4e5..c436650 100644 --- a/py_txi/inference_server.py +++ b/py_txi/inference_server.py @@ -4,17 +4,18 @@ import time from abc import ABC from dataclasses import asdict, dataclass, field -from logging import INFO, basicConfig, getLogger +from logging import INFO, getLogger from typing import Any, Dict, List, Optional, Union +import coloredlogs import docker import docker.errors import docker.types from huggingface_hub import AsyncInferenceClient -from .utils import colored_json_logs, get_free_port +from .utils import get_free_port, styled_logs -basicConfig(level=INFO) +coloredlogs.install(level=INFO, fmt="[%(asctime)s][%(filename)s][%(levelname)s] %(message)s") DOCKER = docker.from_env() LOGGER = getLogger("Inference-Server") @@ -55,6 +56,10 @@ def __post_init__(self) -> None: LOGGER.info("\t+ Getting a free port for the server") self.ports["80/tcp"] = (self.ports["80/tcp"][0], get_free_port()) + if self.shm_size is None: + LOGGER.warning("\t+ Shared memory size not provided. Defaulting to '1g'.") + self.shm_size = "1g" + class InferenceServer(ABC): NAME: str = "Inference-Server" @@ -127,7 +132,7 @@ def __init__(self, config: InferenceServerConfig) -> None: LOGGER.info(f"\t+ Streaming {self.NAME} server logs") for line in self.container.logs(stream=True): log = line.decode("utf-8").strip() - log = colored_json_logs(log) + log = styled_logs(log) if self.SUCCESS_SENTINEL.lower() in log.lower(): LOGGER.info(f"\t+ {log}") diff --git a/py_txi/text_embedding_inference.py b/py_txi/text_embedding_inference.py index 0f1c2d5..f817c16 100644 --- a/py_txi/text_embedding_inference.py +++ b/py_txi/text_embedding_inference.py @@ -34,13 +34,13 @@ def __post_init__(self) -> None: LOGGER.info("\t+ Using the latest CPU image for Text-Embedding-Inference") self.image = "ghcr.io/huggingface/text-embeddings-inference:cpu-latest" + if is_nvidia_system() and "cpu" in self.image: + LOGGER.warning("\t+ You are running on a NVIDIA GPU system but using a CPU image.") + if self.pooling is None: - LOGGER.warning("Pooling strategy not provided. Defaulting to 'cls' pooling.") + LOGGER.warning("\t+ Pooling strategy not provided. Defaulting to 'cls' pooling.") self.pooling = "cls" - if is_nvidia_system() and "cpu" in self.image: - LOGGER.warning("You are running on a NVIDIA GPU system but using a CPU image.") - class TEI(InferenceServer): NAME: str = "Text-Embedding-Inference" diff --git a/py_txi/text_generation_inference.py b/py_txi/text_generation_inference.py index dee0929..e9d5fa5 100644 --- a/py_txi/text_generation_inference.py +++ b/py_txi/text_generation_inference.py @@ -43,7 +43,7 @@ def __post_init__(self) -> None: ) if is_rocm_system() and "rocm" not in self.image: - LOGGER.warning("You are running on a ROCm AMD GPU system but using a non-ROCM image.") + LOGGER.warning("\t+ You are running on a ROCm AMD GPU system but using a non-ROCM image.") class TGI(InferenceServer): diff --git a/py_txi/utils.py b/py_txi/utils.py index 517db1e..a1cb14c 100644 --- a/py_txi/utils.py +++ b/py_txi/utils.py @@ -1,5 +1,6 @@ import socket import subprocess +from datetime import datetime from json import loads @@ -25,21 +26,24 @@ def is_nvidia_system() -> bool: return False -LEVEL_TO_COLOR = { - "DEBUG": "0;34m", - "INFO": "0;32m", - "WARNING": "0;33m", - "WARN": "0;33m", - "ERROR": "0;31m", - "CRITICAL": "0;31m", +LEVEL_TO_MESSAGE_STYLE = { + "DEBUG": "\033[37m", + "INFO": "\033[37m", + "WARN": "\033[33m", + "WARNING": "\033[33m", + "ERROR": "\033[31m", + "CRITICAL": "\033[31m", } +TIMESTAMP_STYLE = "\033[32m" +TARGET_STYLE = "\033[0;38" +LEVEL_STYLE = "\033[1;30m" def color_text(text: str, color: str) -> str: - return f"\033[{color}{text}\033[0m" + return f"{color}{text}\033[0m" -def colored_json_logs(log: str) -> str: +def styled_logs(log: str) -> str: dict_log = loads(log) fields = dict_log.get("fields", {}) @@ -47,10 +51,11 @@ def colored_json_logs(log: str) -> str: target = dict_log.get("target", "could not parse target") timestamp = dict_log.get("timestamp", "could not parse timestamp") message = fields.get("message", dict_log.get("message", "could not parse message")) + timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y-%m-%d %H:%M:%S") - color = LEVEL_TO_COLOR.get(level, "0;37m") + message = color_text(message, LEVEL_TO_MESSAGE_STYLE.get(level, "\033[37m")) + timestamp = color_text(timestamp, TIMESTAMP_STYLE) + target = color_text(target, TARGET_STYLE) + level = color_text(level, LEVEL_STYLE) - level = color_text(level, color) - message = color_text(message, color) - - return f"[{timestamp}][{level}][{target}] - {message}" + return f"[{timestamp}][{target}][{level}] - {message}" diff --git a/setup.py b/setup.py index 72077b5..8050050 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name="py-txi", version=PY_TXI_VERSION, packages=find_packages(), - install_requires=["docker", "huggingface-hub", "numpy", "aiohttp"], + install_requires=["docker", "huggingface-hub", "numpy", "aiohttp", "coloredlogs"], extras_require={"quality": ["ruff"], "testing": ["pytest"]}, **common_setup_kwargs, )