From aea6839ac1cb8fef30c1a1c245fb7ca1bec1f742 Mon Sep 17 00:00:00 2001 From: braf Date: Wed, 13 Nov 2024 17:19:26 +0000 Subject: [PATCH] Fixes found during borecleaning --- .../genai_perf/checkpoint/checkpoint.py | 11 +++--- .../config/generate/perf_analyzer_config.py | 14 ++++++-- .../config/generate/search_parameters.py | 24 ++++++++----- .../genai_perf/config/input/config_command.py | 2 +- .../measurements/run_config_measurement.py | 6 +++- genai-perf/genai_perf/metrics/statistics.py | 7 ++-- genai-perf/tests/test_checkpoint.py | 12 +++---- .../tests/test_llm_profile_data_parser.py | 2 +- .../tests/test_optuna_objective_generator.py | 10 ++++-- genai-perf/tests/test_perf_analyzer_config.py | 36 +++++++++---------- genai-perf/tests/test_search_parameters.py | 29 ++++++++++----- .../tests/test_sweep_objective_generator.py | 14 ++++++-- 12 files changed, 103 insertions(+), 64 deletions(-) diff --git a/genai-perf/genai_perf/checkpoint/checkpoint.py b/genai-perf/genai_perf/checkpoint/checkpoint.py index 162f338c..9568a1c0 100644 --- a/genai-perf/genai_perf/checkpoint/checkpoint.py +++ b/genai-perf/genai_perf/checkpoint/checkpoint.py @@ -38,7 +38,7 @@ class Checkpoint: config: ConfigCommand # Every top-level class that needs to store state is passed in - results: Results + results: Results = Results() def __post_init__(self): self._create_class_from_checkpoint() @@ -61,11 +61,9 @@ def _create_class_from_checkpoint(self) -> None: try: with open(checkpoint_file_path, "r") as checkpoint_file: checkpoint_json = json.load(checkpoint_file) - self._state: CheckpointObject = { - "Results": Results.create_class_from_checkpoint( - checkpoint_json["Results"] - ) - } + self.results = Results.create_class_from_checkpoint( + checkpoint_json["Results"] + ) except EOFError: raise ( @@ -76,7 +74,6 @@ def _create_class_from_checkpoint(self) -> None: ) else: self.checkpoint_exists = False - self._state = {} def _create_checkpoint_file_path(self) -> str: checkpoint_file_path = os.path.join( diff --git a/genai-perf/genai_perf/config/generate/perf_analyzer_config.py b/genai-perf/genai_perf/config/generate/perf_analyzer_config.py index 3c2f0da8..30d281cb 100644 --- a/genai-perf/genai_perf/config/generate/perf_analyzer_config.py +++ b/genai-perf/genai_perf/config/generate/perf_analyzer_config.py @@ -66,13 +66,21 @@ def get_parameters(self) -> Dict[str, Any]: ########################################################################### # CLI String Creation Methods ########################################################################### - def create_cli_string(self) -> str: + def create_command(self) -> List[str]: """ - Returns the PA command as a string + Returns the PA command a list of strings """ cli_args = self._create_required_args() cli_args += self._create_parameter_args() + return cli_args + + def create_cli_string(self) -> str: + """ + Returns the PA command as a string + """ + cli_args = self.create_command() + cli_string = " ".join(cli_args) return cli_string @@ -80,7 +88,7 @@ def _create_required_args(self) -> List[str]: # These come from the config and are always used required_args = [ self._config.path, - "--model-name", + "-m", self._model_name, "--stability-percentage", str(self._config.stability_threshold), diff --git a/genai-perf/genai_perf/config/generate/search_parameters.py b/genai-perf/genai_perf/config/generate/search_parameters.py index f1846834..53cc86bd 100644 --- a/genai-perf/genai_perf/config/generate/search_parameters.py +++ b/genai-perf/genai_perf/config/generate/search_parameters.py @@ -13,7 +13,7 @@ # limitations under the License. from math import log2 -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from genai_perf.config.generate.objective_parameter import ObjectiveCategory from genai_perf.config.generate.search_parameter import ( @@ -22,7 +22,13 @@ SearchParameter, SearchUsage, ) -from genai_perf.config.input.config_command import ConfigOptimize, Range, Subcommand +from genai_perf.config.input.config_command import ( + ConfigAnalyze, + ConfigCommand, + ConfigOptimize, + Range, + Subcommand, +) from genai_perf.exceptions import GenAIPerfException @@ -58,17 +64,17 @@ class SearchParameters: def __init__( self, - config: ConfigOptimize, + config: ConfigCommand, + subcommand: Subcommand, is_bls_model: bool = False, is_ensemble_model: bool = False, is_composing_model: bool = False, ): - self._config = config - self._subcommand = ( - Subcommand.OPTIMIZE - if isinstance(config, ConfigOptimize) - else Subcommand.ANALYZE - ) + self._subcommand = subcommand + if subcommand == Subcommand.OPTIMIZE: + self._config = config.optimize + elif subcommand == Subcommand.ANALYZE: + self._config = config.analyze # type: ignore # TODO: OPTIMIZE # self._supports_model_batch_size = model.supports_batching() diff --git a/genai-perf/genai_perf/config/input/config_command.py b/genai-perf/genai_perf/config/input/config_command.py index 7a17d66c..bb1693a5 100644 --- a/genai-perf/genai_perf/config/input/config_command.py +++ b/genai-perf/genai_perf/config/input/config_command.py @@ -81,7 +81,7 @@ class RunConfigDefaults: OUTPUT_LOGGING = False OUTPUT_PATH = "./logs" MAX_AUTO_ADJUSTS = 10 - STABILITY_THRESHOLD = 99.9 + STABILITY_THRESHOLD = 999 # GAP Input Defaults DATASET = "openorca" diff --git a/genai-perf/genai_perf/measurements/run_config_measurement.py b/genai-perf/genai_perf/measurements/run_config_measurement.py index 242b386c..661172d5 100644 --- a/genai-perf/genai_perf/measurements/run_config_measurement.py +++ b/genai-perf/genai_perf/measurements/run_config_measurement.py @@ -267,6 +267,7 @@ def create_checkpoint_object(self) -> CheckpointObject: # Values based solely on user/config settings (that can vary from run to run) # are not stored in the checkpoint + del rcm_dict["_gpu_metric_objectives"] del rcm_dict["_model_weights"] del rcm_dict["_constraints"] @@ -432,7 +433,10 @@ def _calculate_weighted_rcm_score( self._calculate_weighted_model_rcm_score(other, gpu_id) ) - return mean(weighted_rcm_scores) + if not weighted_rcm_scores: + return RunConfigMeasurementDefaults.EQUALIVILENT + else: + return mean(weighted_rcm_scores) def _calculate_weighted_model_rcm_score( self, other: "RunConfigMeasurement", gpu_id: GpuId diff --git a/genai-perf/genai_perf/metrics/statistics.py b/genai-perf/genai_perf/metrics/statistics.py index c2e4ffb6..443aa407 100755 --- a/genai-perf/genai_perf/metrics/statistics.py +++ b/genai-perf/genai_perf/metrics/statistics.py @@ -36,6 +36,7 @@ from genai_perf.metrics.metrics import Metrics from genai_perf.metrics.telemetry_metrics import TelemetryMetrics from genai_perf.record.record import Record, RecordType +from genai_perf.types import PerfRecords class Statistics: @@ -195,11 +196,11 @@ def export_parquet(self, artifact_dir: Path, filename: str) -> None: filepath = artifact_dir / f"{filename}.gzip" df.to_parquet(filepath, compression="gzip") - def create_records(self) -> List[Record]: + def create_records(self) -> PerfRecords: """ Populates and returns a list of Records """ - statistic_records = [] + statistic_records = {} for metric_base_name, metric_info in self.stats_dict.items(): for metric_post_name, metric_value in metric_info.items(): if metric_post_name == "unit": @@ -216,6 +217,6 @@ def create_records(self) -> List[Record]: f"{metric_name} is not a valid Record tag." ) - statistic_records.append(new_record) + statistic_records[metric_name] = new_record return statistic_records diff --git a/genai-perf/tests/test_checkpoint.py b/genai-perf/tests/test_checkpoint.py index ecd6b9d8..c9db4300 100644 --- a/genai-perf/tests/test_checkpoint.py +++ b/genai-perf/tests/test_checkpoint.py @@ -19,7 +19,7 @@ from genai_perf.checkpoint.checkpoint import Checkpoint from genai_perf.config.generate.search_parameters import SearchParameters from genai_perf.config.generate.sweep_objective_generator import SweepObjectiveGenerator -from genai_perf.config.input.config_command import ConfigCommand +from genai_perf.config.input.config_command import ConfigCommand, Subcommand from genai_perf.config.run.results import Results from tests.test_utils import create_run_config @@ -31,7 +31,9 @@ class TestCheckpoint(unittest.TestCase): def setUp(self): self._config = ConfigCommand(model_names=["test_model"]) self._model_search_parameters = { - "test_model": SearchParameters(self._config.analyze) + "test_model": SearchParameters( + config=self._config, subcommand=Subcommand.ANALYZE + ) } self._sweep_obj_gen = SweepObjectiveGenerator( @@ -72,15 +74,11 @@ def test_checkpoint_methods(self): Checks to ensure checkpoint methods work as intended """ - # First ensure that there is no state when a checkpoint doesn't exist - self.assertEqual({}, self._checkpoint._state) - - # Then write and read back the Results self._checkpoint.create_checkpoint_object() self._checkpoint._create_class_from_checkpoint() os.remove(self._checkpoint._create_checkpoint_file_path()) - self.assertEqual(self._results, self._checkpoint._state["Results"]) + self.assertEqual(self._results, self._checkpoint.results) if __name__ == "__main__": diff --git a/genai-perf/tests/test_llm_profile_data_parser.py b/genai-perf/tests/test_llm_profile_data_parser.py index 01b03b0d..776f3d2f 100644 --- a/genai-perf/tests/test_llm_profile_data_parser.py +++ b/genai-perf/tests/test_llm_profile_data_parser.py @@ -220,7 +220,7 @@ def test_triton_llm_profile_data( # Check that Records can be created records = statistics.create_records() assert records is not None - assert records[0].tag == RequestThroughputAvg.tag + assert records[RequestThroughputAvg.tag] is not None # check non-existing profile data with pytest.raises(KeyError): diff --git a/genai-perf/tests/test_optuna_objective_generator.py b/genai-perf/tests/test_optuna_objective_generator.py index 4b875097..a6f15639 100644 --- a/genai-perf/tests/test_optuna_objective_generator.py +++ b/genai-perf/tests/test_optuna_objective_generator.py @@ -23,7 +23,11 @@ from genai_perf.config.generate.optuna_objective_generator import ( OptunaObjectiveGenerator, ) -from genai_perf.config.generate.search_parameters import SearchParameters, SearchUsage +from genai_perf.config.generate.search_parameters import ( + SearchParameters, + SearchUsage, + Subcommand, +) from genai_perf.config.input.config_command import ConfigCommand, RunConfigDefaults from tests.test_utils import create_perf_metrics, create_run_config_measurement @@ -35,7 +39,9 @@ class TestOptunaObjectiveGenerator(unittest.TestCase): def setUp(self): self._config = ConfigCommand(model_names=["test_model"]) self._model_search_parameters = { - "test_model": SearchParameters(self._config.optimize) + "test_model": SearchParameters( + config=self._config, subcommand=Subcommand.OPTIMIZE + ) } self._perf_metrics = create_perf_metrics(throughput=1000, latency=50) diff --git a/genai-perf/tests/test_perf_analyzer_config.py b/genai-perf/tests/test_perf_analyzer_config.py index d1a4936b..d4e337a2 100644 --- a/genai-perf/tests/test_perf_analyzer_config.py +++ b/genai-perf/tests/test_perf_analyzer_config.py @@ -79,28 +79,26 @@ def test_config_and_objective_capture(self): ) ########################################################################### - # Test CLI String Creation + # Test Command Creation ########################################################################### - def test_default_cli_string_creation(self): + def test_default_command_creation(self): """ Test that the default CLI string is created correctly """ - expected_cli_string = " ".join( - [ - self._config.perf_analyzer.path, - "--model-name", - "test_model", - "--stability-percentage", - str(self._config.perf_analyzer.stability_threshold), - "--batch-size", - "1", - "--concurrency-range", - "64", - ] - ) - cli_string = self._default_perf_analyzer_config.create_cli_string() - - self.assertEqual(expected_cli_string, cli_string) + expected_command = [ + self._config.perf_analyzer.path, + "-m", + "test_model", + "--stability-percentage", + str(self._config.perf_analyzer.stability_threshold), + "--batch-size", + "1", + "--concurrency-range", + "64", + ] + command = self._default_perf_analyzer_config.create_command() + + self.assertEqual(expected_command, command) ########################################################################### # Test Representation @@ -111,7 +109,7 @@ def test_default_representation(self): """ expected_representation = " ".join( [ - "--model-name", + "-m", "test_model", "--stability-percentage", str(self._config.perf_analyzer.stability_threshold), diff --git a/genai-perf/tests/test_search_parameters.py b/genai-perf/tests/test_search_parameters.py index 690018bd..7f480021 100644 --- a/genai-perf/tests/test_search_parameters.py +++ b/genai-perf/tests/test_search_parameters.py @@ -26,6 +26,7 @@ ConfigCommand, Range, RunConfigDefaults, + Subcommand, ) from genai_perf.exceptions import GenAIPerfException @@ -34,7 +35,9 @@ class TestSearchParameters(unittest.TestCase): def setUp(self): self.config = deepcopy(ConfigCommand(model_names=["test_model"])) - self.search_parameters = SearchParameters(config=self.config.optimize) + self.search_parameters = SearchParameters( + config=self.config, subcommand=Subcommand.OPTIMIZE + ) self.search_parameters._add_search_parameter( name="concurrency", @@ -164,7 +167,9 @@ def test_search_parameter_creation_optimize_default(self): """ config = deepcopy(ConfigCommand(model_names=["test_model"])) - search_parameters = SearchParameters(self.config.optimize) + search_parameters = SearchParameters( + config=config, subcommand=Subcommand.OPTIMIZE + ) ####################################################################### # Model Config @@ -227,7 +232,9 @@ def test_search_parameter_no_concurrency_formula(self): config = deepcopy(ConfigCommand(model_names=["test_model"])) config.optimize.perf_analyzer.use_concurrency_formula = False - search_parameters = SearchParameters(config.optimize) + search_parameters = SearchParameters( + config=config, subcommand=Subcommand.OPTIMIZE + ) concurrency = search_parameters.get_parameter("concurrency") self.assertEqual(SearchUsage.RUNTIME_PA, concurrency.usage) @@ -242,7 +249,9 @@ def test_search_parameter_request_rate(self): config = deepcopy(ConfigCommand(model_names=["test_model"])) config.optimize.perf_analyzer.stimulus_type = "request_rate" - search_parameters = SearchParameters(config.optimize) + search_parameters = SearchParameters( + config=config, subcommand=Subcommand.OPTIMIZE + ) request_rate = search_parameters.get_parameter("request_rate") self.assertEqual(SearchUsage.RUNTIME_PA, request_rate.usage) @@ -302,7 +311,9 @@ def test_default_analyze_config(self): Test that search parameters are created correctly when calling default analyze subcommand """ - search_parameters = SearchParameters(config=self.config.analyze) + search_parameters = SearchParameters( + config=self.config, subcommand=Subcommand.ANALYZE + ) concurrency = search_parameters.get_parameter("concurrency") self.assertEqual(SearchUsage.RUNTIME_PA, concurrency.usage) @@ -315,15 +326,17 @@ def test_custom_analyze_config(self): Test that search parameters are created correctly when calling default analyze subcommand """ - config = deepcopy(self.config.analyze) - config.sweep_parameters = { + config = deepcopy(self.config) + config.analyze.sweep_parameters = { "num_prompts": [10, 50, 100], "request_rate": Range( min=RunConfigDefaults.MIN_REQUEST_RATE, max=RunConfigDefaults.MAX_REQUEST_RATE, ), } - search_parameters = SearchParameters(config=config) + search_parameters = SearchParameters( + config=config, subcommand=Subcommand.ANALYZE + ) num_prompts = search_parameters.get_parameter("num_prompts") self.assertEqual(SearchUsage.RUNTIME_GAP, num_prompts.usage) diff --git a/genai-perf/tests/test_sweep_objective_generator.py b/genai-perf/tests/test_sweep_objective_generator.py index 2d7183a0..106c34ec 100644 --- a/genai-perf/tests/test_sweep_objective_generator.py +++ b/genai-perf/tests/test_sweep_objective_generator.py @@ -18,7 +18,11 @@ from genai_perf.config.generate.search_parameters import SearchParameters from genai_perf.config.generate.sweep_objective_generator import SweepObjectiveGenerator -from genai_perf.config.input.config_command import ConfigCommand, RunConfigDefaults +from genai_perf.config.input.config_command import ( + ConfigCommand, + RunConfigDefaults, + Subcommand, +) class TestSweepObjectiveGenerator(unittest.TestCase): @@ -28,8 +32,12 @@ class TestSweepObjectiveGenerator(unittest.TestCase): def setUp(self): self._config = ConfigCommand(model_names=["test_modelA", "test_modelB"]) self._model_search_parameters = { - "test_modelA": SearchParameters(self._config.optimize), - "test_modelB": SearchParameters(self._config.optimize), + "test_modelA": SearchParameters( + config=self._config, subcommand=Subcommand.OPTIMIZE + ), + "test_modelB": SearchParameters( + config=self._config, subcommand=Subcommand.OPTIMIZE + ), } self._sweep_obj_gen = SweepObjectiveGenerator(