From c2cdab29cebb96d6c8533df87f13d98169ea280a Mon Sep 17 00:00:00 2001 From: Yingge He <157551214+yinggeh@users.noreply.github.com> Date: Fri, 16 Aug 2024 16:22:51 -0700 Subject: [PATCH] feat: Report histogram metrics to Triton metrics server (#58) --- README.md | 65 +++++++ .../metrics_test/vllm_metrics_test.py | 164 +++++++++++++++++ src/utils/metrics.py | 174 ++++++++++++++++++ 3 files changed, 403 insertions(+) create mode 100644 ci/L0_backend_vllm/metrics_test/vllm_metrics_test.py create mode 100644 src/utils/metrics.py diff --git a/README.md b/README.md index 13953f58..949c62d3 100644 --- a/README.md +++ b/README.md @@ -202,6 +202,71 @@ you need to specify a different `shm-region-prefix-name` for each server. See [here](https://github.com/triton-inference-server/python_backend#running-multiple-instances-of-triton-server) for more information. +## Triton Metrics +Starting with the 24.08 release of Triton, users can now obtain specific +vLLM metrics by querying the Triton metrics endpoint (see complete vLLM metrics +[here](https://docs.vllm.ai/en/latest/serving/metrics.html)). This can be +accomplished by launching a Triton server in any of the ways described above +(ensuring the build code / container is 24.08 or later) and querying the server. +Upon receiving a successful response, you can query the metrics endpoint by entering +the following: +```bash +curl localhost:8002/metrics +``` +VLLM stats are reported by the metrics endpoint in fields that are prefixed with +`vllm:`. Triton currently supports reporting of the following metrics from vLLM. +```bash +# Number of prefill tokens processed. +counter_prompt_tokens +# Number of generation tokens processed. +counter_generation_tokens +# Histogram of time to first token in seconds. +histogram_time_to_first_token +# Histogram of time per output token in seconds. +histogram_time_per_output_token +``` +Your output for these fields should look similar to the following: +```bash +# HELP vllm:prompt_tokens_total Number of prefill tokens processed. +# TYPE vllm:prompt_tokens_total counter +vllm:prompt_tokens_total{model="vllm_model",version="1"} 10 +# HELP vllm:generation_tokens_total Number of generation tokens processed. +# TYPE vllm:generation_tokens_total counter +vllm:generation_tokens_total{model="vllm_model",version="1"} 16 +# HELP vllm:time_to_first_token_seconds Histogram of time to first token in seconds. +# TYPE vllm:time_to_first_token_seconds histogram +vllm:time_to_first_token_seconds_count{model="vllm_model",version="1"} 1 +vllm:time_to_first_token_seconds_sum{model="vllm_model",version="1"} 0.03233122825622559 +vllm:time_to_first_token_seconds_bucket{model="vllm_model",version="1",le="0.001"} 0 +vllm:time_to_first_token_seconds_bucket{model="vllm_model",version="1",le="0.005"} 0 +... +vllm:time_to_first_token_seconds_bucket{model="vllm_model",version="1",le="+Inf"} 1 +# HELP vllm:time_per_output_token_seconds Histogram of time per output token in seconds. +# TYPE vllm:time_per_output_token_seconds histogram +vllm:time_per_output_token_seconds_count{model="vllm_model",version="1"} 15 +vllm:time_per_output_token_seconds_sum{model="vllm_model",version="1"} 0.04501533508300781 +vllm:time_per_output_token_seconds_bucket{model="vllm_model",version="1",le="0.01"} 14 +vllm:time_per_output_token_seconds_bucket{model="vllm_model",version="1",le="0.025"} 15 +... +vllm:time_per_output_token_seconds_bucket{model="vllm_model",version="1",le="+Inf"} 15 +``` +To enable vLLM engine colleting metrics, "disable_log_stats" option need to be either false +or left empty (false by default) in [model.json](https://github.com/triton-inference-server/vllm_backend/blob/main/samples/model_repository/vllm_model/1/model.json). +```bash +"disable_log_stats": false +``` +*Note:* vLLM metrics are not reported to Triton metrics server by default +due to potential performance slowdowns. To enable vLLM model's metrics +reporting, please add following lines to its config.pbtxt as well. +```bash +parameters: { + key: "REPORT_CUSTOM_METRICS" + value: { + string_value:"yes" + } +} +``` + ## Referencing the Tutorial You can read further in the diff --git a/ci/L0_backend_vllm/metrics_test/vllm_metrics_test.py b/ci/L0_backend_vllm/metrics_test/vllm_metrics_test.py new file mode 100644 index 00000000..db72a57a --- /dev/null +++ b/ci/L0_backend_vllm/metrics_test/vllm_metrics_test.py @@ -0,0 +1,164 @@ +# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re +import sys +import unittest +from functools import partial + +import requests +import tritonclient.grpc as grpcclient +from tritonclient.utils import * + +sys.path.append("../../common") +from test_util import TestResultCollector, UserData, callback, create_vllm_request + + +class VLLMTritonMetricsTest(TestResultCollector): + def setUp(self): + self.triton_client = grpcclient.InferenceServerClient(url="localhost:8001") + self.tritonserver_ipaddr = os.environ.get("TRITONSERVER_IPADDR", "localhost") + self.vllm_model_name = "vllm_opt" + self.prompts = [ + "The most dangerous animal is", + "The capital of France is", + "The future of AI is", + ] + self.sampling_parameters = {"temperature": "0", "top_p": "1"} + + def get_vllm_metrics(self): + """ + Store vllm metrics in a dictionary. + """ + r = requests.get(f"http://{self.tritonserver_ipaddr}:8002/metrics") + r.raise_for_status() + + # Regular expression to match the pattern + pattern = r"^(vllm:[^ {]+)(?:{.*})? ([0-9.-]+)$" + vllm_dict = {} + + # Find all matches in the text + matches = re.findall(pattern, r.text, re.MULTILINE) + + for match in matches: + key, value = match + vllm_dict[key] = float(value) if "." in value else int(value) + + return vllm_dict + + def vllm_infer( + self, + prompts, + sampling_parameters, + model_name, + ): + """ + Helper function to send async stream infer requests to vLLM. + """ + user_data = UserData() + number_of_vllm_reqs = len(prompts) + + self.triton_client.start_stream(callback=partial(callback, user_data)) + for i in range(number_of_vllm_reqs): + request_data = create_vllm_request( + prompts[i], + i, + False, + sampling_parameters, + model_name, + True, + ) + self.triton_client.async_stream_infer( + model_name=model_name, + inputs=request_data["inputs"], + request_id=request_data["request_id"], + outputs=request_data["outputs"], + parameters=sampling_parameters, + ) + + for _ in range(number_of_vllm_reqs): + result = user_data._completed_requests.get() + if type(result) is InferenceServerException: + print(result.message()) + self.assertIsNot(type(result), InferenceServerException, str(result)) + + output = result.as_numpy("text_output") + self.assertIsNotNone(output, "`text_output` should not be None") + + self.triton_client.stop_stream() + + def test_vllm_metrics(self): + # Test vLLM metrics + self.vllm_infer( + prompts=self.prompts, + sampling_parameters=self.sampling_parameters, + model_name=self.vllm_model_name, + ) + metrics_dict = self.get_vllm_metrics() + + # vllm:prompt_tokens_total + self.assertEqual(metrics_dict["vllm:prompt_tokens_total"], 18) + # vllm:generation_tokens_total + self.assertEqual(metrics_dict["vllm:generation_tokens_total"], 48) + + # vllm:time_to_first_token_seconds + self.assertEqual(metrics_dict["vllm:time_to_first_token_seconds_count"], 3) + self.assertGreater(metrics_dict["vllm:time_to_first_token_seconds_sum"], 0) + self.assertEqual(metrics_dict["vllm:time_to_first_token_seconds_bucket"], 3) + # vllm:time_per_output_token_seconds + self.assertEqual(metrics_dict["vllm:time_per_output_token_seconds_count"], 45) + self.assertGreater(metrics_dict["vllm:time_per_output_token_seconds_sum"], 0) + self.assertEqual(metrics_dict["vllm:time_per_output_token_seconds_bucket"], 45) + + def test_vllm_metrics_disabled(self): + # Test vLLM metrics + self.vllm_infer( + prompts=self.prompts, + sampling_parameters=self.sampling_parameters, + model_name=self.vllm_model_name, + ) + metrics_dict = self.get_vllm_metrics() + + # No vLLM metric found + self.assertEqual(len(metrics_dict), 0) + + def test_vllm_metrics_refused(self): + # Test vLLM metrics + self.vllm_infer( + prompts=self.prompts, + sampling_parameters=self.sampling_parameters, + model_name=self.vllm_model_name, + ) + with self.assertRaises(requests.exceptions.ConnectionError): + self.get_vllm_metrics() + + def tearDown(self): + self.triton_client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/utils/metrics.py b/src/utils/metrics.py new file mode 100644 index 00000000..5f007b02 --- /dev/null +++ b/src/utils/metrics.py @@ -0,0 +1,174 @@ +# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from typing import Dict, List, Union + +import triton_python_backend_utils as pb_utils +from vllm.engine.metrics import StatLoggerBase as VllmStatLoggerBase +from vllm.engine.metrics import Stats as VllmStats +from vllm.engine.metrics import SupportsMetricsInfo + + +class TritonMetrics: + def __init__(self, labels): + # Initialize metric families + # Iteration stats + self.counter_prompt_tokens_family = pb_utils.MetricFamily( + name="vllm:prompt_tokens_total", + description="Number of prefill tokens processed.", + kind=pb_utils.MetricFamily.COUNTER, + ) + self.counter_generation_tokens_family = pb_utils.MetricFamily( + name="vllm:generation_tokens_total", + description="Number of generation tokens processed.", + kind=pb_utils.MetricFamily.COUNTER, + ) + self.histogram_time_to_first_token_family = pb_utils.MetricFamily( + name="vllm:time_to_first_token_seconds", + description="Histogram of time to first token in seconds.", + kind=pb_utils.MetricFamily.HISTOGRAM, + ) + self.histogram_time_per_output_token_family = pb_utils.MetricFamily( + name="vllm:time_per_output_token_seconds", + description="Histogram of time per output token in seconds.", + kind=pb_utils.MetricFamily.HISTOGRAM, + ) + + # Initialize metrics + # Iteration stats + self.counter_prompt_tokens = self.counter_prompt_tokens_family.Metric( + labels=labels + ) + self.counter_generation_tokens = self.counter_generation_tokens_family.Metric( + labels=labels + ) + # Use the same bucket boundaries from vLLM sample metrics. + # https://github.com/vllm-project/vllm/blob/21313e09e3f9448817016290da20d0db1adf3664/vllm/engine/metrics.py#L81-L96 + self.histogram_time_to_first_token = ( + self.histogram_time_to_first_token_family.Metric( + labels=labels, + buckets=[ + 0.001, + 0.005, + 0.01, + 0.02, + 0.04, + 0.06, + 0.08, + 0.1, + 0.25, + 0.5, + 0.75, + 1.0, + 2.5, + 5.0, + 7.5, + 10.0, + ], + ) + ) + self.histogram_time_per_output_token = ( + self.histogram_time_per_output_token_family.Metric( + labels=labels, + buckets=[ + 0.01, + 0.025, + 0.05, + 0.075, + 0.1, + 0.15, + 0.2, + 0.3, + 0.4, + 0.5, + 0.75, + 1.0, + 2.5, + ], + ) + ) + + +class VllmStatLogger(VllmStatLoggerBase): + """StatLogger is used as an adapter between vLLM stats collector and Triton metrics provider.""" + + # local_interval not used here. It's for vLLM logs to stdout. + def __init__(self, labels: Dict, local_interval: float = 0) -> None: + # Tracked stats over current local logging interval. + super().__init__(local_interval) + self.metrics = TritonMetrics(labels=labels) + + def info(self, type: str, obj: SupportsMetricsInfo) -> None: + pass + + def _log_counter(self, counter, data: Union[int, float]) -> None: + """Convenience function for logging to counter. + + Args: + counter: A counter metric instance. + data: An int or float to increment the count metric. + + Returns: + None + """ + if data != 0: + counter.increment(data) + + def _log_histogram(self, histogram, data: Union[List[int], List[float]]) -> None: + """Convenience function for logging list to histogram. + + Args: + histogram: A histogram metric instance. + data: A list of int or float data to observe into the histogram metric. + + Returns: + None + """ + for datum in data: + histogram.observe(datum) + + def log(self, stats: VllmStats) -> None: + """Report stats to Triton metrics server. + + Args: + stats: Created by LLMEngine for use by VllmStatLogger. + + Returns: + None + """ + self._log_counter( + self.metrics.counter_prompt_tokens, stats.num_prompt_tokens_iter + ) + self._log_counter( + self.metrics.counter_generation_tokens, stats.num_generation_tokens_iter + ) + self._log_histogram( + self.metrics.histogram_time_to_first_token, stats.time_to_first_tokens_iter + ) + self._log_histogram( + self.metrics.histogram_time_per_output_token, + stats.time_per_output_tokens_iter, + )