diff --git a/src/c++/perf_analyzer/genai-perf/README.md b/src/c++/perf_analyzer/genai-perf/README.md index 53e510541..68b0f2166 100644 --- a/src/c++/perf_analyzer/genai-perf/README.md +++ b/src/c++/perf_analyzer/genai-perf/README.md @@ -523,8 +523,9 @@ An option to enable the generation of plots. (default: False) ##### `--profile-export-file ` The path where the perf_analyzer profile export will be generated. By default, -the profile export will be to `profile_export.json`. The genai-perf file will be -exported to `_genai_perf.csv`. For example, if the profile +the profile export will be to `profile_export.json`. The genai-perf files will be +exported to `_genai_perf.json` and +`_genai_perf.csv`. For example, if the profile export file is `profile_export.json`, the genai-perf file will be exported to `profile_export_genai_perf.csv`. (default: `profile_export.json`) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/csv_exporter.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/csv_exporter.py index efbb9b754..27a963034 100644 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/csv_exporter.py +++ b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/csv_exporter.py @@ -30,8 +30,6 @@ import genai_perf.logging as logging from genai_perf.export_data.exporter_config import ExporterConfig -DEFAULT_OUTPUT_DATA_CSV = "profile_export_genai_perf.csv" - logger = logging.getLogger(__name__) @@ -65,14 +63,16 @@ def __init__(self, config: ExporterConfig): self._args = config.args def export(self) -> None: - csv_filename = self._output_dir / DEFAULT_OUTPUT_DATA_CSV - logger.info(f"Generating {csv_filename}") - - with open(csv_filename, mode="w", newline="") as csvfile: - csv_writer = csv.writer(csvfile) - self._write_request_metrics(csv_writer) - csv_writer.writerow([]) - self._write_system_metrics(csv_writer) + filename = ( + self._output_dir / f"{self._args.profile_export_file.stem}_genai_perf.csv" + ) + logger.info(f"Generating {filename}") + + with open(filename, mode="w", newline="") as f: + writer = csv.writer(f) + self._write_request_metrics(writer) + writer.writerow([]) + self._write_system_metrics(writer) def _write_request_metrics(self, csv_writer) -> None: csv_writer.writerow(self.REQUEST_METRICS_HEADER) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/json_exporter.py b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/json_exporter.py index 2ec24fae1..e1396b3a2 100644 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/json_exporter.py +++ b/src/c++/perf_analyzer/genai-perf/genai_perf/export_data/json_exporter.py @@ -26,14 +26,13 @@ import json +import os from enum import Enum from typing import Dict import genai_perf.logging as logging from genai_perf.export_data.exporter_config import ExporterConfig -DEFAULT_OUTPUT_DATA_JSON = "profile_export_genai_perf.json" - logger = logging.getLogger(__name__) @@ -52,7 +51,10 @@ def __init__(self, config: ExporterConfig): self._merge_stats_and_args() def export(self) -> None: - filename = self._output_dir / DEFAULT_OUTPUT_DATA_JSON + prefix = os.path.splitext(os.path.basename(self._args["profile_export_file"]))[ + 0 + ] + filename = self._output_dir / f"{prefix}_genai_perf.json" logger.info(f"Generating {filename}") with open(str(filename), "w") as f: f.write(json.dumps(self._stats_and_args, indent=2)) diff --git a/src/c++/perf_analyzer/genai-perf/genai_perf/parser.py b/src/c++/perf_analyzer/genai-perf/genai_perf/parser.py index 776535d15..bae4c516b 100644 --- a/src/c++/perf_analyzer/genai-perf/genai_perf/parser.py +++ b/src/c++/perf_analyzer/genai-perf/genai_perf/parser.py @@ -626,8 +626,9 @@ def _add_output_args(parser): default=Path("profile_export.json"), help="The path where the perf_analyzer profile export will be " "generated. By default, the profile export will be to profile_export.json. " - "The genai-perf file will be exported to _genai_perf.csv. " - "For example, if the profile export file is profile_export.json, the genai-perf file will be " + "The genai-perf files will be exported to _genai_perf.json and " + "_genai_perf.csv. " + "For example, if the profile export file is profile_export.json, the genai-perf CSV file will be " "exported to profile_export_genai_perf.csv.", ) diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_csv_exporter.py b/src/c++/perf_analyzer/genai-perf/tests/test_csv_exporter.py index 6a60bc2dc..95bbfe254 100644 --- a/src/c++/perf_analyzer/genai-perf/tests/test_csv_exporter.py +++ b/src/c++/perf_analyzer/genai-perf/tests/test_csv_exporter.py @@ -24,9 +24,10 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import os from io import StringIO from pathlib import Path -from typing import Any, List +from typing import Any, List, Tuple import pytest from genai_perf import parser @@ -37,26 +38,22 @@ class TestCsvExporter: @pytest.fixture - def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[str]: + def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[Tuple[str, str]]: """ This function will mock the open function for specific files. """ written_data = [] - original_open = open - def custom_open(filename, *args, **kwargs): def write(self: Any, content: str) -> int: - written_data.append(content) + print(f"Writing to {filename}") # To help with debugging failures + written_data.append((str(filename), content)) return len(content) - if str(filename) == "profile_export_genai_perf.csv": - tmp_file = StringIO() - tmp_file.write = write.__get__(tmp_file) - return tmp_file - else: - return original_open(filename, *args, **kwargs) + tmp_file = StringIO() + tmp_file.write = write.__get__(tmp_file) + return tmp_file monkeypatch.setattr("builtins.open", custom_open) @@ -115,7 +112,16 @@ def test_streaming_llm_csv_output( "Output Token Throughput (per sec),456.00\r\n", "Request Throughput (per sec),123.00\r\n", ] - returned_data = mock_read_write + expected_filename = "profile_export_genai_perf.csv" + returned_data = [ + data + for filename, data in mock_read_write + if os.path.basename(filename) == expected_filename + ] + if returned_data == []: + raise Exception( + f"Expected file {expected_filename} not found in written data." + ) assert returned_data == expected_content def test_nonstreaming_llm_csv_output( @@ -125,6 +131,9 @@ def test_nonstreaming_llm_csv_output( Collect LLM metrics from profile export data and confirm correct values are printed in csv. """ + artifacts_dir = "artifacts/model_name-openai-chat-concurrency1" + custom_filename = "custom_export.json" + expected_filename = f"custom_export_genai_perf.csv" argv = [ "genai-perf", "profile", @@ -134,6 +143,8 @@ def test_nonstreaming_llm_csv_output( "openai", "--endpoint-type", "chat", + "--profile-export-file", + custom_filename, ] monkeypatch.setattr("sys.argv", argv) args, _ = parser.parse_args() @@ -168,7 +179,13 @@ def test_nonstreaming_llm_csv_output( "Output Token Throughput (per sec),456.00\r\n", "Request Throughput (per sec),123.00\r\n", ] - returned_data = mock_read_write + returned_data = [ + data for filename, data in mock_read_write if filename == expected_filename + ] + if returned_data == []: + raise Exception( + f"Expected file {expected_filename} not found in written data." + ) assert returned_data == expected_content def test_embedding_csv_output( @@ -209,5 +226,5 @@ def test_embedding_csv_output( "Metric,Value\r\n", "Request Throughput (per sec),123.00\r\n", ] - returned_data = mock_read_write + returned_data = [data for _, data in mock_read_write] assert returned_data == expected_content diff --git a/src/c++/perf_analyzer/genai-perf/tests/test_json_exporter.py b/src/c++/perf_analyzer/genai-perf/tests/test_json_exporter.py index f82e59312..8b6e96e33 100644 --- a/src/c++/perf_analyzer/genai-perf/tests/test_json_exporter.py +++ b/src/c++/perf_analyzer/genai-perf/tests/test_json_exporter.py @@ -25,14 +25,42 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import json +import os +from io import StringIO +from typing import Any, List, Tuple import genai_perf.parser as parser +import pytest from genai_perf.export_data.exporter_config import ExporterConfig from genai_perf.export_data.json_exporter import JsonExporter class TestJsonExporter: - def test_generate_json(self, monkeypatch) -> None: + @pytest.fixture + def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[Tuple[str, str]]: + """ + This function will mock the open function for specific files. + """ + + written_data = [] + + def custom_open(filename, *args, **kwargs): + def write(self: Any, content: str) -> int: + print(f"Writing to {filename}") + written_data.append((str(filename), content)) + return len(content) + + tmp_file = StringIO() + tmp_file.write = write.__get__(tmp_file) + return tmp_file + + monkeypatch.setattr("builtins.open", custom_open) + + return written_data + + def test_generate_json( + self, monkeypatch, mock_read_write: pytest.MonkeyPatch + ) -> None: cli_cmd = [ "genai-perf", "profile", @@ -55,6 +83,64 @@ def test_generate_json(self, monkeypatch) -> None: config.artifact_dir = args.artifact_dir json_exporter = JsonExporter(config) assert json_exporter._stats_and_args == json.loads(self.expected_json_output) + json_exporter.export() + expected_filename = "profile_export_genai_perf.json" + written_data = [ + data + for filename, data in mock_read_write + if os.path.basename(filename) == expected_filename + ] + if written_data == []: + raise Exception( + f"Expected file {expected_filename} not found in written data." + ) + assert len(written_data) == 1 + assert json.loads(written_data[0]) == json.loads(self.expected_json_output) + + def test_generate_json_custom_export( + self, monkeypatch, mock_read_write: pytest.MonkeyPatch + ) -> None: + artifacts_dir = "artifacts/gpt2_vllm-triton-vllm-concurrency1" + custom_filename = "custom_export.json" + expected_filename = f"{artifacts_dir}/custom_export_genai_perf.json" + expected_profile_filename = f"{artifacts_dir}/custom_export.json" + cli_cmd = [ + "genai-perf", + "profile", + "-m", + "gpt2_vllm", + "--backend", + "vllm", + "--streaming", + "--extra-inputs", + "max_tokens:256", + "--extra-inputs", + "ignore_eos:true", + "--profile-export-file", + custom_filename, + ] + monkeypatch.setattr("sys.argv", cli_cmd) + args, _ = parser.parse_args() + config = ExporterConfig() + config.stats = self.stats + config.args = args + config.extra_inputs = parser.get_extra_inputs_as_dict(args) + config.artifact_dir = args.artifact_dir + json_exporter = JsonExporter(config) + json_exporter.export() + written_data = [ + data for filename, data in mock_read_write if filename == expected_filename + ] + if written_data == []: + raise Exception( + f"Expected file {expected_filename} not found in written data." + ) + assert len(written_data) == 1 + expected_json_output = json.loads(self.expected_json_output) + expected_json_output["input_config"][ + "profile_export_file" + ] = expected_profile_filename + assert json.loads(written_data[0]) == expected_json_output stats = { "request_throughput": {"unit": "requests/sec", "avg": "7"},