Skip to content

Commit

Permalink
Fixes found during borecleaning
Browse files Browse the repository at this point in the history
  • Loading branch information
nv-braf committed Nov 13, 2024
1 parent 3edbf03 commit aea6839
Show file tree
Hide file tree
Showing 12 changed files with 103 additions and 64 deletions.
11 changes: 4 additions & 7 deletions genai-perf/genai_perf/checkpoint/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class Checkpoint:
config: ConfigCommand

# Every top-level class that needs to store state is passed in
results: Results
results: Results = Results()

def __post_init__(self):
self._create_class_from_checkpoint()
Expand All @@ -61,11 +61,9 @@ def _create_class_from_checkpoint(self) -> None:
try:
with open(checkpoint_file_path, "r") as checkpoint_file:
checkpoint_json = json.load(checkpoint_file)
self._state: CheckpointObject = {
"Results": Results.create_class_from_checkpoint(
checkpoint_json["Results"]
)
}
self.results = Results.create_class_from_checkpoint(
checkpoint_json["Results"]
)

except EOFError:
raise (
Expand All @@ -76,7 +74,6 @@ def _create_class_from_checkpoint(self) -> None:
)
else:
self.checkpoint_exists = False
self._state = {}

def _create_checkpoint_file_path(self) -> str:
checkpoint_file_path = os.path.join(
Expand Down
14 changes: 11 additions & 3 deletions genai-perf/genai_perf/config/generate/perf_analyzer_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,21 +66,29 @@ def get_parameters(self) -> Dict[str, Any]:
###########################################################################
# CLI String Creation Methods
###########################################################################
def create_cli_string(self) -> str:
def create_command(self) -> List[str]:
"""
Returns the PA command as a string
Returns the PA command a list of strings
"""
cli_args = self._create_required_args()
cli_args += self._create_parameter_args()

return cli_args

def create_cli_string(self) -> str:
"""
Returns the PA command as a string
"""
cli_args = self.create_command()

cli_string = " ".join(cli_args)
return cli_string

def _create_required_args(self) -> List[str]:
# These come from the config and are always used
required_args = [
self._config.path,
"--model-name",
"-m",
self._model_name,
"--stability-percentage",
str(self._config.stability_threshold),
Expand Down
24 changes: 15 additions & 9 deletions genai-perf/genai_perf/config/generate/search_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

from math import log2
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Optional, Union

Check notice

Code scanning / CodeQL

Unused import Note

Import of 'Union' is not used.

from genai_perf.config.generate.objective_parameter import ObjectiveCategory
from genai_perf.config.generate.search_parameter import (
Expand All @@ -22,7 +22,13 @@
SearchParameter,
SearchUsage,
)
from genai_perf.config.input.config_command import ConfigOptimize, Range, Subcommand
from genai_perf.config.input.config_command import (
ConfigAnalyze,
ConfigCommand,
ConfigOptimize,
Range,
Subcommand,
)

Check notice

Code scanning / CodeQL

Unused import Note

Import of 'ConfigAnalyze' is not used.
Import of 'ConfigOptimize' is not used.
from genai_perf.exceptions import GenAIPerfException


Expand Down Expand Up @@ -58,17 +64,17 @@ class SearchParameters:

def __init__(
self,
config: ConfigOptimize,
config: ConfigCommand,
subcommand: Subcommand,
is_bls_model: bool = False,
is_ensemble_model: bool = False,
is_composing_model: bool = False,
):
self._config = config
self._subcommand = (
Subcommand.OPTIMIZE
if isinstance(config, ConfigOptimize)
else Subcommand.ANALYZE
)
self._subcommand = subcommand
if subcommand == Subcommand.OPTIMIZE:
self._config = config.optimize
elif subcommand == Subcommand.ANALYZE:
self._config = config.analyze # type: ignore

# TODO: OPTIMIZE
# self._supports_model_batch_size = model.supports_batching()
Expand Down
2 changes: 1 addition & 1 deletion genai-perf/genai_perf/config/input/config_command.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ class RunConfigDefaults:
OUTPUT_LOGGING = False
OUTPUT_PATH = "./logs"
MAX_AUTO_ADJUSTS = 10
STABILITY_THRESHOLD = 99.9
STABILITY_THRESHOLD = 999

# GAP Input Defaults
DATASET = "openorca"
Expand Down
6 changes: 5 additions & 1 deletion genai-perf/genai_perf/measurements/run_config_measurement.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,7 @@ def create_checkpoint_object(self) -> CheckpointObject:

# Values based solely on user/config settings (that can vary from run to run)
# are not stored in the checkpoint
del rcm_dict["_gpu_metric_objectives"]
del rcm_dict["_model_weights"]
del rcm_dict["_constraints"]

Expand Down Expand Up @@ -432,7 +433,10 @@ def _calculate_weighted_rcm_score(
self._calculate_weighted_model_rcm_score(other, gpu_id)
)

return mean(weighted_rcm_scores)
if not weighted_rcm_scores:
return RunConfigMeasurementDefaults.EQUALIVILENT
else:
return mean(weighted_rcm_scores)

def _calculate_weighted_model_rcm_score(
self, other: "RunConfigMeasurement", gpu_id: GpuId
Expand Down
7 changes: 4 additions & 3 deletions genai-perf/genai_perf/metrics/statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
from genai_perf.metrics.metrics import Metrics
from genai_perf.metrics.telemetry_metrics import TelemetryMetrics
from genai_perf.record.record import Record, RecordType
from genai_perf.types import PerfRecords


class Statistics:
Expand Down Expand Up @@ -195,11 +196,11 @@ def export_parquet(self, artifact_dir: Path, filename: str) -> None:
filepath = artifact_dir / f"{filename}.gzip"
df.to_parquet(filepath, compression="gzip")

def create_records(self) -> List[Record]:
def create_records(self) -> PerfRecords:
"""
Populates and returns a list of Records
"""
statistic_records = []
statistic_records = {}
for metric_base_name, metric_info in self.stats_dict.items():
for metric_post_name, metric_value in metric_info.items():
if metric_post_name == "unit":
Expand All @@ -216,6 +217,6 @@ def create_records(self) -> List[Record]:
f"{metric_name} is not a valid Record tag."
)

statistic_records.append(new_record)
statistic_records[metric_name] = new_record

return statistic_records
12 changes: 5 additions & 7 deletions genai-perf/tests/test_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from genai_perf.checkpoint.checkpoint import Checkpoint
from genai_perf.config.generate.search_parameters import SearchParameters
from genai_perf.config.generate.sweep_objective_generator import SweepObjectiveGenerator
from genai_perf.config.input.config_command import ConfigCommand
from genai_perf.config.input.config_command import ConfigCommand, Subcommand
from genai_perf.config.run.results import Results
from tests.test_utils import create_run_config

Expand All @@ -31,7 +31,9 @@ class TestCheckpoint(unittest.TestCase):
def setUp(self):
self._config = ConfigCommand(model_names=["test_model"])
self._model_search_parameters = {
"test_model": SearchParameters(self._config.analyze)
"test_model": SearchParameters(
config=self._config, subcommand=Subcommand.ANALYZE
)
}

self._sweep_obj_gen = SweepObjectiveGenerator(
Expand Down Expand Up @@ -72,15 +74,11 @@ def test_checkpoint_methods(self):
Checks to ensure checkpoint methods work as intended
"""

# First ensure that there is no state when a checkpoint doesn't exist
self.assertEqual({}, self._checkpoint._state)

# Then write and read back the Results
self._checkpoint.create_checkpoint_object()
self._checkpoint._create_class_from_checkpoint()
os.remove(self._checkpoint._create_checkpoint_file_path())

self.assertEqual(self._results, self._checkpoint._state["Results"])
self.assertEqual(self._results, self._checkpoint.results)


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion genai-perf/tests/test_llm_profile_data_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def test_triton_llm_profile_data(
# Check that Records can be created
records = statistics.create_records()
assert records is not None
assert records[0].tag == RequestThroughputAvg.tag
assert records[RequestThroughputAvg.tag] is not None

# check non-existing profile data
with pytest.raises(KeyError):
Expand Down
10 changes: 8 additions & 2 deletions genai-perf/tests/test_optuna_objective_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,11 @@
from genai_perf.config.generate.optuna_objective_generator import (
OptunaObjectiveGenerator,
)
from genai_perf.config.generate.search_parameters import SearchParameters, SearchUsage
from genai_perf.config.generate.search_parameters import (
SearchParameters,
SearchUsage,
Subcommand,
)
from genai_perf.config.input.config_command import ConfigCommand, RunConfigDefaults
from tests.test_utils import create_perf_metrics, create_run_config_measurement

Expand All @@ -35,7 +39,9 @@ class TestOptunaObjectiveGenerator(unittest.TestCase):
def setUp(self):
self._config = ConfigCommand(model_names=["test_model"])
self._model_search_parameters = {
"test_model": SearchParameters(self._config.optimize)
"test_model": SearchParameters(
config=self._config, subcommand=Subcommand.OPTIMIZE
)
}

self._perf_metrics = create_perf_metrics(throughput=1000, latency=50)
Expand Down
36 changes: 17 additions & 19 deletions genai-perf/tests/test_perf_analyzer_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,28 +79,26 @@ def test_config_and_objective_capture(self):
)

###########################################################################
# Test CLI String Creation
# Test Command Creation
###########################################################################
def test_default_cli_string_creation(self):
def test_default_command_creation(self):
"""
Test that the default CLI string is created correctly
"""
expected_cli_string = " ".join(
[
self._config.perf_analyzer.path,
"--model-name",
"test_model",
"--stability-percentage",
str(self._config.perf_analyzer.stability_threshold),
"--batch-size",
"1",
"--concurrency-range",
"64",
]
)
cli_string = self._default_perf_analyzer_config.create_cli_string()

self.assertEqual(expected_cli_string, cli_string)
expected_command = [
self._config.perf_analyzer.path,
"-m",
"test_model",
"--stability-percentage",
str(self._config.perf_analyzer.stability_threshold),
"--batch-size",
"1",
"--concurrency-range",
"64",
]
command = self._default_perf_analyzer_config.create_command()

self.assertEqual(expected_command, command)

###########################################################################
# Test Representation
Expand All @@ -111,7 +109,7 @@ def test_default_representation(self):
"""
expected_representation = " ".join(
[
"--model-name",
"-m",
"test_model",
"--stability-percentage",
str(self._config.perf_analyzer.stability_threshold),
Expand Down
29 changes: 21 additions & 8 deletions genai-perf/tests/test_search_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
ConfigCommand,
Range,
RunConfigDefaults,
Subcommand,
)
from genai_perf.exceptions import GenAIPerfException

Expand All @@ -34,7 +35,9 @@ class TestSearchParameters(unittest.TestCase):
def setUp(self):
self.config = deepcopy(ConfigCommand(model_names=["test_model"]))

self.search_parameters = SearchParameters(config=self.config.optimize)
self.search_parameters = SearchParameters(
config=self.config, subcommand=Subcommand.OPTIMIZE
)

self.search_parameters._add_search_parameter(
name="concurrency",
Expand Down Expand Up @@ -164,7 +167,9 @@ def test_search_parameter_creation_optimize_default(self):
"""

config = deepcopy(ConfigCommand(model_names=["test_model"]))
search_parameters = SearchParameters(self.config.optimize)
search_parameters = SearchParameters(
config=config, subcommand=Subcommand.OPTIMIZE
)

#######################################################################
# Model Config
Expand Down Expand Up @@ -227,7 +232,9 @@ def test_search_parameter_no_concurrency_formula(self):
config = deepcopy(ConfigCommand(model_names=["test_model"]))
config.optimize.perf_analyzer.use_concurrency_formula = False

search_parameters = SearchParameters(config.optimize)
search_parameters = SearchParameters(
config=config, subcommand=Subcommand.OPTIMIZE
)

concurrency = search_parameters.get_parameter("concurrency")
self.assertEqual(SearchUsage.RUNTIME_PA, concurrency.usage)
Expand All @@ -242,7 +249,9 @@ def test_search_parameter_request_rate(self):
config = deepcopy(ConfigCommand(model_names=["test_model"]))
config.optimize.perf_analyzer.stimulus_type = "request_rate"

search_parameters = SearchParameters(config.optimize)
search_parameters = SearchParameters(
config=config, subcommand=Subcommand.OPTIMIZE
)

request_rate = search_parameters.get_parameter("request_rate")
self.assertEqual(SearchUsage.RUNTIME_PA, request_rate.usage)
Expand Down Expand Up @@ -302,7 +311,9 @@ def test_default_analyze_config(self):
Test that search parameters are created correctly when calling
default analyze subcommand
"""
search_parameters = SearchParameters(config=self.config.analyze)
search_parameters = SearchParameters(
config=self.config, subcommand=Subcommand.ANALYZE
)

concurrency = search_parameters.get_parameter("concurrency")
self.assertEqual(SearchUsage.RUNTIME_PA, concurrency.usage)
Expand All @@ -315,15 +326,17 @@ def test_custom_analyze_config(self):
Test that search parameters are created correctly when calling
default analyze subcommand
"""
config = deepcopy(self.config.analyze)
config.sweep_parameters = {
config = deepcopy(self.config)
config.analyze.sweep_parameters = {
"num_prompts": [10, 50, 100],
"request_rate": Range(
min=RunConfigDefaults.MIN_REQUEST_RATE,
max=RunConfigDefaults.MAX_REQUEST_RATE,
),
}
search_parameters = SearchParameters(config=config)
search_parameters = SearchParameters(
config=config, subcommand=Subcommand.ANALYZE
)

num_prompts = search_parameters.get_parameter("num_prompts")
self.assertEqual(SearchUsage.RUNTIME_GAP, num_prompts.usage)
Expand Down
Loading

0 comments on commit aea6839

Please sign in to comment.