diff --git a/.github/workflows/python-package-genai.yml b/.github/workflows/python-package-genai.yml index 88d3750a..6a401cb8 100644 --- a/.github/workflows/python-package-genai.yml +++ b/.github/workflows/python-package-genai.yml @@ -39,7 +39,7 @@ jobs: fail-fast: false matrix: os: ["ubuntu-22.04"] - python-version: ["3.8", "3.10"] + python-version: ["3.10"] steps: - uses: actions/checkout@v3 diff --git a/genai-perf/README.md b/genai-perf/README.md index f2466df8..02c6803a 100644 --- a/genai-perf/README.md +++ b/genai-perf/README.md @@ -89,7 +89,7 @@ genai-perf --help Since GenAI-Perf depends on Perf Analyzer, you'll need to install the Perf Analyzer binary: -### Install Perf Analyzer (Ubuntu, Python 3.8+) +### Install Perf Analyzer (Ubuntu, Python 3.10+) **NOTE**: you must already have CUDA 12 installed (checkout the [CUDA installation guide](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html)). @@ -282,7 +282,7 @@ When the dataset is synthetic, you can specify the following options: When the dataset is coming from a file, you can specify the following options: -* `--input-file `: The input file or directory containing the prompts or +* `--input-file `: The input file or directory containing the prompts or filepaths to images to use for benchmarking as JSON objects. For any dataset, you can specify the following options: @@ -420,7 +420,7 @@ Alternatively, a string representing a json formatted dict can be provided. ##### `--input-file ` -The input file or directory containing the content to use for +The input file or directory containing the content to use for profiling. To use synthetic files for a converter that needs multiple files, prefix the path with 'synthetic:', followed by a comma-separated list of filenames. The synthetic filenames should not have diff --git a/genai-perf/genai_perf/inputs/converters/base_converter.py b/genai-perf/genai_perf/inputs/converters/base_converter.py index 907b0c21..c6b171b0 100644 --- a/genai-perf/genai_perf/inputs/converters/base_converter.py +++ b/genai-perf/genai_perf/inputs/converters/base_converter.py @@ -42,12 +42,14 @@ class BaseConverter: def check_config(self, config: InputsConfig) -> None: """ Check whether the provided configuration is valid for this converter. - + Throws a GenAIPerfException if the configuration is invalid. """ pass - def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict[Any, Any]: + def convert( + self, generic_dataset: GenericDataset, config: InputsConfig + ) -> Dict[Any, Any]: """ Construct a request body using the endpoint specific request format. """ diff --git a/genai-perf/genai_perf/inputs/converters/openai_chat_completions_converter.py b/genai-perf/genai_perf/inputs/converters/openai_chat_completions_converter.py index 70188c9e..2a40587a 100644 --- a/genai-perf/genai_perf/inputs/converters/openai_chat_completions_converter.py +++ b/genai-perf/genai_perf/inputs/converters/openai_chat_completions_converter.py @@ -27,27 +27,41 @@ import random from typing import Any, Dict, List, Union +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters.base_converter import BaseConverter -from genai_perf.inputs.input_constants import DEFAULT_OUTPUT_TOKENS_MEAN, OutputFormat +from genai_perf.inputs.input_constants import ( + DEFAULT_BATCH_SIZE, + DEFAULT_OUTPUT_TOKENS_MEAN, + OutputFormat, +) from genai_perf.inputs.inputs_config import InputsConfig from genai_perf.inputs.retrievers.generic_dataset import DataRow, GenericDataset -from genai_perf.exceptions import GenAIPerfException -from genai_perf.inputs.input_constants import DEFAULT_BATCH_SIZE + class OpenAIChatCompletionsConverter(BaseConverter): def check_config(self, config: InputsConfig) -> None: if config.output_format == OutputFormat.IMAGE_RETRIEVAL: if config.add_stream: - raise GenAIPerfException(f"The --streaming option is not supported for {config.output_format.to_lowercase()}.") - elif config.output_format == OutputFormat.OPENAI_CHAT_COMPLETIONS or config.output_format == OutputFormat.OPENAI_VISION: + raise GenAIPerfException( + f"The --streaming option is not supported for {config.output_format.to_lowercase()}." + ) + elif ( + config.output_format == OutputFormat.OPENAI_CHAT_COMPLETIONS + or config.output_format == OutputFormat.OPENAI_VISION + ): if config.batch_size_text != DEFAULT_BATCH_SIZE: - raise GenAIPerfException(f"The --batch-size-text flag is not supported for {config.output_format.to_lowercase()}.") + raise GenAIPerfException( + f"The --batch-size-text flag is not supported for {config.output_format.to_lowercase()}." + ) if config.batch_size_image != DEFAULT_BATCH_SIZE: - raise GenAIPerfException(f"The --batch-size-image flag is not supported for {config.output_format.to_lowercase()}.") + raise GenAIPerfException( + f"The --batch-size-image flag is not supported for {config.output_format.to_lowercase()}." + ) - - def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict[Any, Any]: + def convert( + self, generic_dataset: GenericDataset, config: InputsConfig + ) -> Dict[Any, Any]: request_body: Dict[str, Any] = {"data": []} for file_data in generic_dataset.files_data.values(): @@ -57,7 +71,9 @@ def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict return request_body - def _create_payload(self, index: int, row: DataRow, config: InputsConfig) -> Dict[Any, Any]: + def _create_payload( + self, index: int, row: DataRow, config: InputsConfig + ) -> Dict[Any, Any]: model_name = self._select_model_name(config, index) content = self._retrieve_content(row, config) @@ -74,18 +90,25 @@ def _create_payload(self, index: int, row: DataRow, config: InputsConfig) -> Dic self._add_request_params(payload, config) return payload - def _retrieve_content(self, row: DataRow, config: InputsConfig) -> Union[str, List[Dict[Any, Any]]]: + def _retrieve_content( + self, row: DataRow, config: InputsConfig + ) -> Union[str, List[Dict[Any, Any]]]: content: Union[str, List[Dict[Any, Any]]] = "" if config.output_format == OutputFormat.OPENAI_CHAT_COMPLETIONS: content = row.texts[0] - elif config.output_format == OutputFormat.OPENAI_VISION or config.output_format == OutputFormat.IMAGE_RETRIEVAL: + elif ( + config.output_format == OutputFormat.OPENAI_VISION + or config.output_format == OutputFormat.IMAGE_RETRIEVAL + ): content = self._add_multi_modal_content(row) else: - raise GenAIPerfException(f"Output format {config.output_format} is not supported") + raise GenAIPerfException( + f"Output format {config.output_format} is not supported" + ) return content - + def _add_multi_modal_content(self, entry: DataRow) -> List[Dict[Any, Any]]: - content = [] + content: List[Dict[Any, Any]] = [] for text in entry.texts: content.append( { diff --git a/genai-perf/genai_perf/inputs/converters/openai_completions_converter.py b/genai-perf/genai_perf/inputs/converters/openai_completions_converter.py index 8a126704..c8b2df43 100644 --- a/genai-perf/genai_perf/inputs/converters/openai_completions_converter.py +++ b/genai-perf/genai_perf/inputs/converters/openai_completions_converter.py @@ -32,9 +32,12 @@ from genai_perf.inputs.inputs_config import InputsConfig from genai_perf.inputs.retrievers.generic_dataset import GenericDataset + class OpenAICompletionsConverter(BaseConverter): - def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict[Any, Any]: + def convert( + self, generic_dataset: GenericDataset, config: InputsConfig + ) -> Dict[Any, Any]: request_body: Dict[str, Any] = {"data": []} for file_data in generic_dataset.files_data.values(): diff --git a/genai-perf/genai_perf/inputs/converters/openai_embeddings_converter.py b/genai-perf/genai_perf/inputs/converters/openai_embeddings_converter.py index f6718bf3..2b0485fb 100644 --- a/genai-perf/genai_perf/inputs/converters/openai_embeddings_converter.py +++ b/genai-perf/genai_perf/inputs/converters/openai_embeddings_converter.py @@ -26,18 +26,23 @@ from typing import Any, Dict +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters.base_converter import BaseConverter from genai_perf.inputs.inputs_config import InputsConfig from genai_perf.inputs.retrievers.generic_dataset import GenericDataset -from genai_perf.exceptions import GenAIPerfException + class OpenAIEmbeddingsConverter(BaseConverter): def check_config(self, config: InputsConfig) -> None: if config.add_stream: - raise GenAIPerfException(f"The --streaming option is not supported for {config.output_format.to_lowercase()}.") - - def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict[Any, Any]: + raise GenAIPerfException( + f"The --streaming option is not supported for {config.output_format.to_lowercase()}." + ) + + def convert( + self, generic_dataset: GenericDataset, config: InputsConfig + ) -> Dict[Any, Any]: request_body: Dict[str, Any] = {"data": []} for file_data in generic_dataset.files_data.values(): diff --git a/genai-perf/genai_perf/inputs/converters/rankings_converter.py b/genai-perf/genai_perf/inputs/converters/rankings_converter.py index 76dc0995..7b807a59 100644 --- a/genai-perf/genai_perf/inputs/converters/rankings_converter.py +++ b/genai-perf/genai_perf/inputs/converters/rankings_converter.py @@ -24,26 +24,31 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from typing import Any, Dict +from typing import Any, Dict, List, Union +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters.base_converter import BaseConverter from genai_perf.inputs.inputs_config import InputsConfig from genai_perf.inputs.retrievers.generic_dataset import GenericDataset -from genai_perf.exceptions import GenAIPerfException + class RankingsConverter(BaseConverter): def check_config(self, config: InputsConfig) -> None: if config.add_stream: - raise GenAIPerfException(f"The --streaming option is not supported for {config.output_format.to_lowercase()}.") - - def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict[Any, Any]: + raise GenAIPerfException( + f"The --streaming option is not supported for {config.output_format.to_lowercase()}." + ) + + def convert( + self, generic_dataset: GenericDataset, config: InputsConfig + ) -> Dict[Any, Any]: provided_filenames = list(generic_dataset.files_data.keys()) if "queries" not in provided_filenames or "passages" not in provided_filenames: raise ValueError( "Both 'queries.jsonl' and 'passages.jsonl' must be present in the input datasets." ) - + queries_data = generic_dataset.files_data["queries"] passages_data = generic_dataset.files_data["passages"] @@ -60,15 +65,15 @@ def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict passage_entry = passages_data.rows[query_index] + passages: Union[List[str], List[Dict[str, str]]] + payload: Dict[str, Any] if self._is_rankings_tei(config): passages = passage_entry.texts payload = {"query": query, "texts": passages} else: passages = [ - {"text_input": p} - for p in passage_entry.texts - if p is not None + {"text_input": p} for p in passage_entry.texts if p is not None ] payload = { "query": query, @@ -76,11 +81,9 @@ def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict "model": model_name, } - self._add_request_params(payload, config) request_body["data"].append({"payload": [payload]}) - return request_body def _is_rankings_tei(self, config: InputsConfig) -> bool: diff --git a/genai-perf/genai_perf/inputs/converters/tensorrtllm_converter.py b/genai-perf/genai_perf/inputs/converters/tensorrtllm_converter.py index 9e825f61..36549ea0 100644 --- a/genai-perf/genai_perf/inputs/converters/tensorrtllm_converter.py +++ b/genai-perf/genai_perf/inputs/converters/tensorrtllm_converter.py @@ -27,25 +27,30 @@ import random from typing import Any, Dict +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters.base_converter import BaseConverter from genai_perf.inputs.input_constants import ( + DEFAULT_BATCH_SIZE, DEFAULT_OUTPUT_TOKENS_MEAN, DEFAULT_TENSORRTLLM_MAX_TOKENS, ) from genai_perf.inputs.inputs_config import InputsConfig from genai_perf.inputs.retrievers.generic_dataset import GenericDataset -from genai_perf.exceptions import GenAIPerfException -from genai_perf.inputs.input_constants import DEFAULT_BATCH_SIZE + class TensorRTLLMConverter(BaseConverter): def check_config(self, config: InputsConfig) -> None: if config.batch_size_text != DEFAULT_BATCH_SIZE: - raise GenAIPerfException(f"The --batch-size-text flag is not supported for {config.output_format.to_lowercase()}.") - - def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict[Any, Any]: + raise GenAIPerfException( + f"The --batch-size-text flag is not supported for {config.output_format.to_lowercase()}." + ) + + def convert( + self, generic_dataset: GenericDataset, config: InputsConfig + ) -> Dict[Any, Any]: request_body: Dict[str, Any] = {"data": []} - + for file_data in generic_dataset.files_data.values(): for index, row in enumerate(file_data.rows): model_name = self._select_model_name(config, index) diff --git a/genai-perf/genai_perf/inputs/converters/tensorrtllm_engine_converter.py b/genai-perf/genai_perf/inputs/converters/tensorrtllm_engine_converter.py index eb9f0393..cda6df61 100644 --- a/genai-perf/genai_perf/inputs/converters/tensorrtllm_engine_converter.py +++ b/genai-perf/genai_perf/inputs/converters/tensorrtllm_engine_converter.py @@ -27,22 +27,27 @@ import random from typing import Any, Dict +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters.base_converter import BaseConverter from genai_perf.inputs.input_constants import ( + DEFAULT_BATCH_SIZE, DEFAULT_OUTPUT_TOKENS_MEAN, DEFAULT_TENSORRTLLM_MAX_TOKENS, ) from genai_perf.inputs.inputs_config import InputsConfig from genai_perf.inputs.retrievers.generic_dataset import GenericDataset -from genai_perf.exceptions import GenAIPerfException -from genai_perf.inputs.input_constants import DEFAULT_BATCH_SIZE + class TensorRTLLMEngineConverter(BaseConverter): def check_config(self, config: InputsConfig) -> None: if config.batch_size_text != DEFAULT_BATCH_SIZE: - raise GenAIPerfException(f"The --batch-size-text flag is not supported for {config.output_format.to_lowercase()}.") - - def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict[Any, Any]: + raise GenAIPerfException( + f"The --batch-size-text flag is not supported for {config.output_format.to_lowercase()}." + ) + + def convert( + self, generic_dataset: GenericDataset, config: InputsConfig + ) -> Dict[Any, Any]: request_body: Dict[str, Any] = {"data": []} for file_data in generic_dataset.files_data.values(): diff --git a/genai-perf/genai_perf/inputs/converters/vllm_converter.py b/genai-perf/genai_perf/inputs/converters/vllm_converter.py index d97be0c2..306631ae 100644 --- a/genai-perf/genai_perf/inputs/converters/vllm_converter.py +++ b/genai-perf/genai_perf/inputs/converters/vllm_converter.py @@ -28,20 +28,27 @@ import random from typing import Any, Dict +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters.base_converter import BaseConverter -from genai_perf.inputs.input_constants import DEFAULT_OUTPUT_TOKENS_MEAN +from genai_perf.inputs.input_constants import ( + DEFAULT_BATCH_SIZE, + DEFAULT_OUTPUT_TOKENS_MEAN, +) from genai_perf.inputs.inputs_config import InputsConfig from genai_perf.inputs.retrievers.generic_dataset import GenericDataset -from genai_perf.exceptions import GenAIPerfException -from genai_perf.inputs.input_constants import DEFAULT_BATCH_SIZE + class VLLMConverter(BaseConverter): def check_config(self, config: InputsConfig) -> None: if config.batch_size_text != DEFAULT_BATCH_SIZE: - raise GenAIPerfException(f"The --batch-size-text flag is not supported for {config.output_format.to_lowercase()}.") - - def convert(self, generic_dataset: GenericDataset, config: InputsConfig) -> Dict[Any, Any]: + raise GenAIPerfException( + f"The --batch-size-text flag is not supported for {config.output_format.to_lowercase()}." + ) + + def convert( + self, generic_dataset: GenericDataset, config: InputsConfig + ) -> Dict[Any, Any]: request_body: Dict[str, Any] = {"data": []} for file_data in generic_dataset.files_data.values(): diff --git a/genai-perf/genai_perf/inputs/retrievers/file_input_retriever.py b/genai-perf/genai_perf/inputs/retrievers/file_input_retriever.py index 7e7657bf..51e00d81 100644 --- a/genai-perf/genai_perf/inputs/retrievers/file_input_retriever.py +++ b/genai-perf/genai_perf/inputs/retrievers/file_input_retriever.py @@ -26,25 +26,29 @@ import random from pathlib import Path -from typing import cast, Dict, List, Tuple +from typing import Dict, List, Tuple, cast from genai_perf import utils from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.input_constants import DEFAULT_BATCH_SIZE from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.inputs.retrievers.generic_dataset import DataRow, FileData, GenericDataset -from genai_perf.utils import load_json_str -from genai_perf.inputs.retrievers.synthetic_image_generator import ImageFormat from genai_perf.inputs.retrievers.base_input_retriever import BaseInputRetriever +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) +from genai_perf.inputs.retrievers.synthetic_image_generator import ImageFormat +from genai_perf.utils import load_json_str from PIL import Image + class FileInputRetriever(BaseInputRetriever): """ A input retriever class that handles input data provided by the user through file and directories. """ - def retrieve_data(self) -> GenericDataset: """ Retrieves the dataset from a file or directory. @@ -62,9 +66,9 @@ def retrieve_data(self) -> GenericDataset: else: file_data = self._get_input_dataset_from_file(self.config.input_filename) files_data = {file_data.filename: file_data} - + return GenericDataset(files_data) - + def _get_input_datasets_from_dir(self) -> Dict[str, FileData]: """ Retrieves the dataset from a directory containing multiple JSONL files. @@ -73,7 +77,7 @@ def _get_input_datasets_from_dir(self) -> Dict[str, FileData]: ---------- directory : Path The directory path to process. - + Returns ------- Dict[str, FileData] @@ -83,14 +87,16 @@ def _get_input_datasets_from_dir(self) -> Dict[str, FileData]: self.config.input_filename = cast(Path, self.config.input_filename) jsonl_files = list(self.config.input_filename.glob("*.jsonl")) if not jsonl_files: - raise ValueError(f"No JSONL files found in directory '{self.config.input_filename}'.") - + raise ValueError( + f"No JSONL files found in directory '{self.config.input_filename}'." + ) + files_data: Dict[str, FileData] = {} for file in jsonl_files: file_data = self._get_input_dataset_from_file(file) files_data[file.stem] = file_data return files_data - + def _get_input_dataset_from_file(self, filename: Path) -> FileData: """ Retrieves the dataset from a specific JSONL file. @@ -99,7 +105,7 @@ def _get_input_dataset_from_file(self, filename: Path) -> FileData: ---------- filename : Path The path of the file to process. - + Returns ------- Dict @@ -125,11 +131,11 @@ def _verify_file(self, filename: Path) -> None: If the file does not exist. """ if not filename.exists(): - raise FileNotFoundError( - f"The file '{filename}' does not exist." - ) + raise FileNotFoundError(f"The file '{filename}' does not exist.") - def _get_content_from_input_file(self, filename: Path) -> Tuple[List[str], List[str]]: + def _get_content_from_input_file( + self, filename: Path + ) -> Tuple[List[str], List[str]]: """ Reads the content from a JSONL file and returns lists of each content type. @@ -137,7 +143,7 @@ def _get_content_from_input_file(self, filename: Path) -> Tuple[List[str], List[ ---------- filename : Path The file path from which to read the content. - + Returns ------- Tuple[List[str], List[str]] @@ -198,7 +204,9 @@ def _encode_image(self, filename: str) -> str: payload = f"data:image/{img.format.lower()};base64,{img_base64}" return payload - def _convert_content_to_data_file(self, prompts: List[str], images: List[str], filename: Path) -> FileData: + def _convert_content_to_data_file( + self, prompts: List[str], images: List[str], filename: Path + ) -> FileData: """ Converts the content to a DataFile. @@ -210,14 +218,14 @@ def _convert_content_to_data_file(self, prompts: List[str], images: List[str], f The list of images to convert. filename : Path The filename to use for the DataFile. - + Returns ------- FileData The DataFile containing the converted data. """ data_rows: List[DataRow] = [] - + if prompts and images: if self.config.batch_size_text > len(prompts): raise ValueError( @@ -227,11 +235,16 @@ def _convert_content_to_data_file(self, prompts: List[str], images: List[str], f raise ValueError( "Batch size for images cannot be larger than the number of available images" ) - if self.config.batch_size_image > DEFAULT_BATCH_SIZE or self.config.batch_size_text > DEFAULT_BATCH_SIZE: + if ( + self.config.batch_size_image > DEFAULT_BATCH_SIZE + or self.config.batch_size_text > DEFAULT_BATCH_SIZE + ): for _ in range(self.config.num_prompts): sampled_texts = random.sample(prompts, self.config.batch_size_text) sampled_images = random.sample(images, self.config.batch_size_image) - data_rows.append(DataRow(texts=sampled_texts, images=sampled_images)) + data_rows.append( + DataRow(texts=sampled_texts, images=sampled_images) + ) else: for prompt, image in zip(prompts, images): data_rows.append(DataRow(texts=[prompt], images=[image])) @@ -252,7 +265,7 @@ def _convert_content_to_data_file(self, prompts: List[str], images: List[str], f raise ValueError( "Batch size for images cannot be larger than the number of available images" ) - + if self.config.batch_size_image > DEFAULT_BATCH_SIZE: for _ in range(self.config.num_prompts): sampled_images = random.sample(images, self.config.batch_size_image) diff --git a/genai-perf/genai_perf/inputs/retrievers/generic_dataset.py b/genai-perf/genai_perf/inputs/retrievers/generic_dataset.py index 85dc515e..126c2dd2 100644 --- a/genai-perf/genai_perf/inputs/retrievers/generic_dataset.py +++ b/genai-perf/genai_perf/inputs/retrievers/generic_dataset.py @@ -24,8 +24,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from typing import Dict, List, TypeAlias from dataclasses import dataclass, field +from typing import Dict, List, TypeAlias Filename: TypeAlias = str TypeOfData: TypeAlias = str @@ -33,6 +33,7 @@ DataRowDict: TypeAlias = Dict[TypeOfData, ListOfData] GenericDatasetDict: TypeAlias = Dict[Filename, List[DataRowDict]] + @dataclass class DataRow: texts: List[str] = field(default_factory=list) @@ -44,6 +45,7 @@ def to_dict(self) -> DataRowDict: """ return {"texts": self.texts, "images": self.images} + @dataclass class FileData: filename: str @@ -62,6 +64,7 @@ def to_dict(self) -> Dict[Filename, List[DataRowDict]]: """ return {self.filename: [row.to_dict() for row in self.rows]} + @dataclass class GenericDataset: files_data: Dict[str, FileData] diff --git a/genai-perf/genai_perf/inputs/retrievers/input_retriever_factory.py b/genai-perf/genai_perf/inputs/retrievers/input_retriever_factory.py index 2ab289cb..e7cca0de 100644 --- a/genai-perf/genai_perf/inputs/retrievers/input_retriever_factory.py +++ b/genai-perf/genai_perf/inputs/retrievers/input_retriever_factory.py @@ -27,15 +27,17 @@ from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.input_constants import PromptSource from genai_perf.inputs.inputs_config import InputsConfig +from genai_perf.inputs.retrievers.base_input_retriever import BaseInputRetriever from genai_perf.inputs.retrievers.file_input_retriever import FileInputRetriever from genai_perf.inputs.retrievers.synthetic_data_retriever import SyntheticDataRetriever -from genai_perf.inputs.retrievers.base_input_retriever import BaseInputRetriever + class InputRetrieverFactory: """ Factory class to create the input retriever to get the input data based on the input source. """ + @staticmethod def create(config: InputsConfig) -> BaseInputRetriever: retrievers = { diff --git a/genai-perf/genai_perf/inputs/retrievers/synthetic_data_retriever.py b/genai-perf/genai_perf/inputs/retrievers/synthetic_data_retriever.py index 4eb1c2bb..ed5035f8 100644 --- a/genai-perf/genai_perf/inputs/retrievers/synthetic_data_retriever.py +++ b/genai-perf/genai_perf/inputs/retrievers/synthetic_data_retriever.py @@ -27,7 +27,9 @@ from typing import List +from genai_perf.inputs.input_constants import DEFAULT_SYNTHETIC_FILENAME from genai_perf.inputs.inputs_config import InputsConfig +from genai_perf.inputs.retrievers.base_input_retriever import BaseInputRetriever from genai_perf.inputs.retrievers.generic_dataset import ( DataRow, FileData, @@ -39,8 +41,7 @@ from genai_perf.inputs.retrievers.synthetic_prompt_generator import ( SyntheticPromptGenerator, ) -from genai_perf.inputs.retrievers.base_input_retriever import BaseInputRetriever -from genai_perf.inputs.input_constants import DEFAULT_SYNTHETIC_FILENAME + class SyntheticDataRetriever(BaseInputRetriever): """ @@ -66,18 +67,18 @@ def retrieve_data(self) -> GenericDataset: for _ in range(self.config.batch_size_image): image = SyntheticImageGenerator.create_synthetic_image( - image_width_mean=self.config.image_width_mean, - image_width_stddev=self.config.image_width_stddev, - image_height_mean=self.config.image_height_mean, - image_height_stddev=self.config.image_height_stddev, - image_format=self.config.image_format, - ) + image_width_mean=self.config.image_width_mean, + image_width_stddev=self.config.image_width_stddev, + image_height_mean=self.config.image_height_mean, + image_height_stddev=self.config.image_height_stddev, + image_format=self.config.image_format, + ) row.images.append(image) data_rows.append(row) file_data = FileData(file, data_rows) - + synthetic_dataset.files_data[file] = file_data return synthetic_dataset diff --git a/genai-perf/genai_perf/parser.py b/genai-perf/genai_perf/parser.py index a18173ab..77166fa8 100644 --- a/genai-perf/genai_perf/parser.py +++ b/genai-perf/genai_perf/parser.py @@ -344,13 +344,15 @@ def parse_goodput(values): def _infer_prompt_source(args: argparse.Namespace) -> argparse.Namespace: args.synthetic_input_files = None - + if args.input_file: if str(args.input_file).startswith("synthetic:"): args.prompt_source = ic.PromptSource.SYNTHETIC synthetic_input_files_str = str(args.input_file).split(":", 1)[1] args.synthetic_input_files = synthetic_input_files_str.split(",") - logger.debug(f"Input source is synthetic data: {args.synthetic_input_files}") + logger.debug( + f"Input source is synthetic data: {args.synthetic_input_files}" + ) else: args.prompt_source = ic.PromptSource.FILE logger.debug(f"Input source is the following path: {args.input_file}") @@ -376,10 +378,10 @@ def file_or_directory(value: str) -> Path: if value.startswith("synthetic:"): return Path(value) else: - path = Path(value) - if path.is_file() or path.is_dir: - return path - + path = Path(value) + if path.is_file() or path.is_dir(): + return path + raise ValueError(f"'{value}' is not a valid file or directory") @@ -439,7 +441,7 @@ def _add_input_args(parser): "comma-separated list of filenames. The synthetic filenames should " "not have extensions. For example, 'synthetic:queries,passages'. " "Each line should be a JSON object with a 'text' or 'image' field " - "in JSONL format. Example: {\"text\": \"Your prompt here\"}", + 'in JSONL format. Example: {"text": "Your prompt here"}', ) input_group.add_argument( diff --git a/genai-perf/pyproject.toml b/genai-perf/pyproject.toml index 46e19686..3cf73057 100644 --- a/genai-perf/pyproject.toml +++ b/genai-perf/pyproject.toml @@ -37,14 +37,13 @@ classifiers = [ "Topic :: Scientific/Engineering", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.10", "Operating System :: Unix", ] authors = [] maintainers = [] keywords = [] -requires-python = ">=3.8,<4" +requires-python = ">=3.10,<4" dependencies = [ "numpy<2", "pytest", diff --git a/genai-perf/tests/test_cli.py b/genai-perf/tests/test_cli.py index b5805bc7..1214ca4e 100644 --- a/genai-perf/tests/test_cli.py +++ b/genai-perf/tests/test_cli.py @@ -62,7 +62,7 @@ def test_help_version_arguments_output_and_exit( monkeypatch.setattr("sys.argv", ["genai-perf"] + args) with pytest.raises(SystemExit) as excinfo: - _ = parser.parse_args() + parser.parse_args() # Check that the exit was successful assert excinfo.value.code == 0 @@ -324,7 +324,7 @@ def test_multiple_model_args( assert getattr(args, key) == value def test_file_flags_parsed(self, monkeypatch, mocker): - _ = mocker.patch("os.path.isfile", return_value=True) + mocker.patch.object(Path, "is_file", return_value=True) combined_args = [ "genai-perf", "profile", @@ -736,7 +736,7 @@ def test_repeated_extra_arg_warning(self, monkeypatch, args, expected_error): parsed_args, _ = parser.parse_args() with pytest.raises(ValueError) as exc_info: - _ = parser.get_extra_inputs_as_dict(parsed_args) + parser.get_extra_inputs_as_dict(parsed_args) assert str(exc_info.value) == expected_error @@ -754,7 +754,7 @@ def test_goodput_args_warning(self, monkeypatch, args, expected_error): monkeypatch.setattr("sys.argv", combined_args) with pytest.raises(ValueError) as exc_info: - parsed_args, _ = parser.parse_args() + parser.parse_args() assert str(exc_info.value) == expected_error @@ -772,9 +772,7 @@ def test_goodput_args_warning(self, monkeypatch, args, expected_error): def test_inferred_prompt_source( self, monkeypatch, mocker, args, expected_prompt_source ): - _ = mocker.patch("builtins.open", mocker.mock_open(read_data="data")) - _ = mocker.patch("os.path.isfile", return_value=True) - _ = mocker.patch("os.path.isdir", return_value=True) + mocker.patch.object(Path, "is_file", return_value=True) combined_args = ["genai-perf", "profile", "--model", "test_model"] + args monkeypatch.setattr("sys.argv", combined_args) args, _ = parser.parse_args() @@ -822,7 +820,7 @@ def test_compare_help_arguments_output_and_exit( monkeypatch.setattr("sys.argv", ["genai-perf", "compare"] + args) with pytest.raises(SystemExit) as excinfo: - _ = parser.parse_args() + parser.parse_args() # Check that the exit was successful assert excinfo.value.code == 0 diff --git a/genai-perf/tests/test_embeddings_converter.py b/genai-perf/tests/test_embeddings_converter.py index 68a6d9a7..c24084f3 100644 --- a/genai-perf/tests/test_embeddings_converter.py +++ b/genai-perf/tests/test_embeddings_converter.py @@ -24,12 +24,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pytest +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters import OpenAIEmbeddingsConverter from genai_perf.inputs.input_constants import ModelSelectionStrategy, OutputFormat from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.exceptions import GenAIPerfException -from genai_perf.inputs.retrievers.generic_dataset import GenericDataset, DataRow, FileData -import pytest +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) class TestOpenAIEmbeddingsConverter: @@ -43,14 +47,17 @@ def test_check_config_streaming_unsupported(self): with pytest.raises(GenAIPerfException) as exc_info: converter.check_config(config) - assert str(exc_info.value) == "The --streaming option is not supported for openai_embeddings." + assert ( + str(exc_info.value) + == "The --streaming option is not supported for openai_embeddings." + ) def test_convert_default(self): generic_dataset = GenericDataset( files_data={ "file1": FileData( filename="file1", - rows=[DataRow(texts=["text_1"]), DataRow(texts=["text_2"])] + rows=[DataRow(texts=["text_1"]), DataRow(texts=["text_2"])], ) } ) @@ -92,7 +99,10 @@ def test_convert_batched(self): files_data={ "file1": FileData( filename="file1", - rows=[DataRow(texts=["text_1", "text_2"]), DataRow(texts=["text_3", "text_4"])] + rows=[ + DataRow(texts=["text_1", "text_2"]), + DataRow(texts=["text_3", "text_4"]), + ], ) } ) @@ -136,7 +146,7 @@ def test_convert_with_request_parameters(self): files_data={ "file1": FileData( filename="file1", - rows=[DataRow(texts=["text_1"]), DataRow(texts=["text_2"])] + rows=[DataRow(texts=["text_1"]), DataRow(texts=["text_2"])], ) } ) diff --git a/genai-perf/tests/test_file_input_retriever.py b/genai-perf/tests/test_file_input_retriever.py index ecbd8e30..89d12e00 100644 --- a/genai-perf/tests/test_file_input_retriever.py +++ b/genai-perf/tests/test_file_input_retriever.py @@ -26,6 +26,7 @@ from pathlib import Path from unittest.mock import mock_open, patch + import pytest from genai_perf.inputs.input_constants import ModelSelectionStrategy from genai_perf.inputs.inputs_config import InputsConfig @@ -72,15 +73,23 @@ def open_side_effect(filepath, *args, **kwargs): return mock_open(read_data=file_contents.get(filename))() @staticmethod - @patch("genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", return_value="mock_base64_image") + @patch( + "genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", + return_value="mock_base64_image", + ) def mock_encode_image(mock_encode_image): return mock_encode_image - + @patch("pathlib.Path.exists", return_value=True) @patch("PIL.Image.open", return_value=Image.new("RGB", (10, 10))) - @patch("genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", return_value="mock_base64_image") + @patch( + "genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", + return_value="mock_base64_image", + ) @patch("builtins.open", side_effect=open_side_effect) - def test_get_input_file_single_image(self, mock_file, mock_image, mock_encode_image, mock_exists): + def test_get_input_file_single_image( + self, mock_file, mock_image, mock_encode_image, mock_exists + ): file_retriever = FileInputRetriever( InputsConfig( model_name=["test_model_A"], @@ -88,7 +97,9 @@ def test_get_input_file_single_image(self, mock_file, mock_image, mock_encode_im input_filename=Path("single_image.jsonl"), ) ) - file_data = file_retriever._get_input_dataset_from_file(Path("single_image.jsonl")) + file_data = file_retriever._get_input_dataset_from_file( + Path("single_image.jsonl") + ) assert file_data is not None assert file_data.filename == "single_image.jsonl" @@ -97,9 +108,14 @@ def test_get_input_file_single_image(self, mock_file, mock_image, mock_encode_im @patch("pathlib.Path.exists", return_value=True) @patch("PIL.Image.open", return_value=Image.new("RGB", (10, 10))) - @patch("genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", side_effect=["mock_base64_image1", "mock_base64_image2", "mock_base64_image3"]) + @patch( + "genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", + side_effect=["mock_base64_image1", "mock_base64_image2", "mock_base64_image3"], + ) @patch("builtins.open", side_effect=open_side_effect) - def test_get_input_file_multiple_images(self, mock_file, mock_image_open, mock_encode_image, mock_exists): + def test_get_input_file_multiple_images( + self, mock_file, mock_image_open, mock_encode_image, mock_exists + ): file_retriever = FileInputRetriever( InputsConfig( model_name=["test_model_A"], @@ -107,12 +123,18 @@ def test_get_input_file_multiple_images(self, mock_file, mock_image_open, mock_e input_filename=Path("multiple_images.jsonl"), ) ) - file_data = file_retriever._get_input_dataset_from_file(Path("multiple_images.jsonl")) + file_data = file_retriever._get_input_dataset_from_file( + Path("multiple_images.jsonl") + ) assert file_data is not None assert file_data.filename == "multiple_images.jsonl" assert len(file_data.rows) == 3 - expected_images = ["mock_base64_image1", "mock_base64_image2", "mock_base64_image3"] + expected_images = [ + "mock_base64_image1", + "mock_base64_image2", + "mock_base64_image3", + ] for i, image in enumerate(expected_images): assert file_data.rows[i].images[0] == image @@ -126,7 +148,9 @@ def test_get_input_file_single_prompt(self, mock_file, mock_exists): input_filename=Path("single_prompt.jsonl"), ) ) - file_data = file_retriever._get_input_dataset_from_file(Path("single_prompt.jsonl")) + file_data = file_retriever._get_input_dataset_from_file( + Path("single_prompt.jsonl") + ) assert file_data is not None assert file_data.filename == "single_prompt.jsonl" @@ -143,7 +167,9 @@ def test_get_input_file_multiple_prompts(self, mock_file, mock_exists): input_filename=Path("multiple_prompts.jsonl"), ) ) - file_data = file_retriever._get_input_dataset_from_file(Path("multiple_prompts.jsonl")) + file_data = file_retriever._get_input_dataset_from_file( + Path("multiple_prompts.jsonl") + ) assert file_data is not None assert file_data.filename == "multiple_prompts.jsonl" @@ -151,16 +177,21 @@ def test_get_input_file_multiple_prompts(self, mock_file, mock_exists): expected_prompts = [ "What is the capital of France?", "Who wrote 1984?", - "What is quantum computing?" + "What is quantum computing?", ] for i, prompt in enumerate(expected_prompts): assert file_data.rows[i].texts[0] == prompt @patch("pathlib.Path.exists", return_value=True) @patch("PIL.Image.open", return_value=Image.new("RGB", (10, 10))) - @patch("genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", return_value="mock_base64_image") + @patch( + "genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", + return_value="mock_base64_image", + ) @patch("builtins.open", side_effect=open_side_effect) - def test_get_input_file_multi_modal(self, mock_file, mock_image, mock_encode_image, mock_exists): + def test_get_input_file_multi_modal( + self, mock_file, mock_image, mock_encode_image, mock_exists + ): file_retriever = FileInputRetriever( InputsConfig( model_name=["test_model_A"], @@ -168,7 +199,9 @@ def test_get_input_file_multi_modal(self, mock_file, mock_image, mock_encode_ima input_filename=Path("multi_modal.jsonl"), ) ) - file_data = file_retriever._get_input_dataset_from_file(Path("multi_modal.jsonl")) + file_data = file_retriever._get_input_dataset_from_file( + Path("multi_modal.jsonl") + ) assert file_data is not None assert file_data.filename == "multi_modal.jsonl" @@ -186,7 +219,9 @@ def test_get_input_file_deprecated_text_input(self, mock_file, mock_exists): input_filename=Path("deprecated_text_input.jsonl"), ) ) - file_data = file_retriever._get_input_dataset_from_file(Path("deprecated_text_input.jsonl")) + file_data = file_retriever._get_input_dataset_from_file( + Path("deprecated_text_input.jsonl") + ) assert file_data is not None assert file_data.filename == "deprecated_text_input.jsonl" @@ -205,9 +240,11 @@ def test_get_input_file_conflicting_key(self, mock_file, mock_exists): input_filename=Path("conflicting_key.jsonl"), ) ) - with pytest.raises(ValueError, match="Each data entry must have only one of 'text_input' or 'text' key name."): + with pytest.raises( + ValueError, + match="Each data entry must have only one of 'text_input' or 'text' key name.", + ): file_retriever._get_input_dataset_from_file(Path("conflicting_key.jsonl")) - def test_get_input_file_without_file_existing(self): file_retriever = FileInputRetriever( @@ -219,21 +256,41 @@ def test_get_input_file_without_file_existing(self): @patch("pathlib.Path.exists", return_value=True) @patch("pathlib.Path.is_dir", return_value=True) @patch("pathlib.Path.glob", return_value=[]) - def test_get_input_datasets_from_dir_no_jsonl_files(self, mock_exists, mock_is_dir, mock_glob): + def test_get_input_datasets_from_dir_no_jsonl_files( + self, mock_exists, mock_is_dir, mock_glob + ): file_retriever = FileInputRetriever( InputsConfig(input_filename=Path("empty_dir")) ) with pytest.raises(ValueError, match="No JSONL files found in directory"): _ = file_retriever._get_input_datasets_from_dir() - @patch("pathlib.Path.exists", return_value=True) @patch("pathlib.Path.is_dir", return_value=True) - @patch("pathlib.Path.glob", return_value=[Path("single_prompt.jsonl"), Path("multiple_prompts.jsonl"), Path("single_image.jsonl"), Path("multi_modal.jsonl")]) + @patch( + "pathlib.Path.glob", + return_value=[ + Path("single_prompt.jsonl"), + Path("multiple_prompts.jsonl"), + Path("single_image.jsonl"), + Path("multi_modal.jsonl"), + ], + ) @patch("PIL.Image.open", return_value=Image.new("RGB", (10, 10))) - @patch("genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", return_value="mock_base64_image") + @patch( + "genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever._encode_image", + return_value="mock_base64_image", + ) @patch("builtins.open", side_effect=open_side_effect) - def test_get_input_datasets_from_dir(self, mock_file, mock_image_open, mock_encode_image, mock_glob, mock_is_dir, mock_exists): + def test_get_input_datasets_from_dir( + self, + mock_file, + mock_image_open, + mock_encode_image, + mock_glob, + mock_is_dir, + mock_exists, + ): file_retriever = FileInputRetriever( InputsConfig( model_name=["test_model_A"], @@ -241,26 +298,28 @@ def test_get_input_datasets_from_dir(self, mock_file, mock_image_open, mock_enco input_filename=Path("test_dir"), ) ) - + file_data = file_retriever._get_input_datasets_from_dir() - + assert len(file_data) == 4 assert file_data["single_prompt"].filename == "single_prompt.jsonl" assert len(file_data["single_prompt"].rows) == 1 - assert file_data["single_prompt"].rows[0].texts[0] == "What is the capital of France?" + assert ( + file_data["single_prompt"].rows[0].texts[0] + == "What is the capital of France?" + ) assert file_data["multiple_prompts"].filename == "multiple_prompts.jsonl" assert len(file_data["multiple_prompts"].rows) == 3 expected_prompts = [ "What is the capital of France?", "Who wrote 1984?", - "What is quantum computing?" + "What is quantum computing?", ] for i, prompt in enumerate(expected_prompts): assert file_data["multiple_prompts"].rows[i].texts[0] == prompt - assert file_data["single_image"].filename == "single_image.jsonl" assert len(file_data["single_image"].rows) == 1 assert file_data["single_image"].rows[0].images[0] == "mock_base64_image" @@ -273,7 +332,9 @@ def test_get_input_datasets_from_dir(self, mock_file, mock_image_open, mock_enco @patch("pathlib.Path.exists", return_value=True) @patch("pathlib.Path.is_dir", return_value=True) @patch("pathlib.Path.glob", return_value=[]) - def test_get_input_datasets_from_empty_dir(self, mock_exists, mock_is_dir, mock_glob): + def test_get_input_datasets_from_empty_dir( + self, mock_exists, mock_is_dir, mock_glob + ): file_retriever = FileInputRetriever( InputsConfig(input_filename=Path("empty_dir")) ) diff --git a/genai-perf/tests/test_input_retriever_factory.py b/genai-perf/tests/test_input_retriever_factory.py index a6edaa1e..cc0cff01 100644 --- a/genai-perf/tests/test_input_retriever_factory.py +++ b/genai-perf/tests/test_input_retriever_factory.py @@ -12,29 +12,43 @@ # See the License for the specific language governing permissions and # limitations under the License. +from pathlib import Path from unittest.mock import patch + from genai_perf.inputs.input_constants import PromptSource from genai_perf.inputs.inputs_config import InputsConfig +from genai_perf.inputs.retrievers.file_input_retriever import FileInputRetriever from genai_perf.inputs.retrievers.input_retriever_factory import InputRetrieverFactory from genai_perf.inputs.retrievers.synthetic_data_retriever import SyntheticDataRetriever -from genai_perf.inputs.retrievers.file_input_retriever import FileInputRetriever -from pathlib import Path + class TestInputRetrieverFactory: def test_create_file_retrieverg(self): - config = InputsConfig(input_type=PromptSource.FILE, input_filename=Path("input_data.jsonl")) - with patch("genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever.__init__", return_value=None) as mock_init: + config = InputsConfig( + input_type=PromptSource.FILE, input_filename=Path("input_data.jsonl") + ) + with patch( + "genai_perf.inputs.retrievers.file_input_retriever.FileInputRetriever.__init__", + return_value=None, + ) as mock_init: retriever = InputRetrieverFactory.create(config) mock_init.assert_called_once_with(config) - assert isinstance(retriever, FileInputRetriever), "Should return a FileInputRetriever" + assert isinstance( + retriever, FileInputRetriever + ), "Should return a FileInputRetriever" def test_create_synthetic_retriever(self): """ Test that SyntheticDataRetriever is created and passed the correct config. """ config = InputsConfig(input_type=PromptSource.SYNTHETIC, num_prompts=10) - with patch("genai_perf.inputs.retrievers.synthetic_data_retriever.SyntheticDataRetriever.__init__", return_value=None) as mock_init: + with patch( + "genai_perf.inputs.retrievers.synthetic_data_retriever.SyntheticDataRetriever.__init__", + return_value=None, + ) as mock_init: retriever = InputRetrieverFactory.create(config) mock_init.assert_called_once_with(config) - assert isinstance(retriever, SyntheticDataRetriever), "Should return a SyntheticDataRetriever" + assert isinstance( + retriever, SyntheticDataRetriever + ), "Should return a SyntheticDataRetriever" diff --git a/genai-perf/tests/test_inputs.py b/genai-perf/tests/test_inputs.py index 7f5ff962..3c698207 100644 --- a/genai-perf/tests/test_inputs.py +++ b/genai-perf/tests/test_inputs.py @@ -12,24 +12,38 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import patch, mock_open +import json +from pathlib import Path +from unittest.mock import MagicMock, mock_open, patch + from genai_perf.inputs.input_constants import OutputFormat from genai_perf.inputs.inputs import Inputs from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.inputs.retrievers.generic_dataset import GenericDataset, DataRow, FileData -from pathlib import Path -import json -from unittest.mock import MagicMock +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) from genai_perf.tokenizer import Tokenizer + class TestInputs: - + @patch("genai_perf.inputs.inputs.InputRetrieverFactory.create") @patch("genai_perf.inputs.inputs.OutputFormatConverterFactory.create") @patch("builtins.open", new_callable=mock_open) # Mock the file writing - @patch.object(Tokenizer, 'encode', return_value=[1243, 1881, 697]) # Mock Tokenizer encode method + @patch.object( + Tokenizer, "encode", return_value=[1243, 1881, 697] + ) # Mock Tokenizer encode method @patch("genai_perf.tokenizer.get_tokenizer") # Mock the get_tokenizer function - def test_data_retrieval_and_conversion(self, mock_get_tokenizer, mock_encode, mock_open_fn, mock_converter_factory, mock_retriever_factory): + def test_data_retrieval_and_conversion( + self, + mock_get_tokenizer, + mock_encode, + mock_open_fn, + mock_converter_factory, + mock_retriever_factory, + ): mock_tokenizer = MagicMock(spec=Tokenizer) mock_get_tokenizer.return_value = mock_tokenizer @@ -37,7 +51,7 @@ def test_data_retrieval_and_conversion(self, mock_get_tokenizer, mock_encode, mo files_data={ "file1.jsonl": FileData( filename="file1.jsonl", - rows=[DataRow(texts=["test input"], images=[])] + rows=[DataRow(texts=["test input"], images=[])], ) } ) @@ -59,14 +73,24 @@ def test_data_retrieval_and_conversion(self, mock_get_tokenizer, mock_encode, mo mock_retriever_factory.return_value.retrieve_data.assert_called_once() mock_converter.convert.assert_called_once_with(generic_dataset, inputs.config) - mock_open_fn.assert_called_once_with(str(inputs.config.output_dir / "inputs.json"), "w") - mock_open_fn().write.assert_called_once_with(json.dumps(expected_output, indent=2)) + mock_open_fn.assert_called_once_with( + str(inputs.config.output_dir / "inputs.json"), "w" + ) + mock_open_fn().write.assert_called_once_with( + json.dumps(expected_output, indent=2) + ) @patch("genai_perf.inputs.inputs.InputRetrieverFactory.create") @patch("genai_perf.inputs.inputs.OutputFormatConverterFactory.create") @patch("builtins.open", new_callable=mock_open) @patch("genai_perf.tokenizer.get_tokenizer") - def test_write_json_to_file(self, mock_get_tokenizer, mock_open_fn, mock_converter_factory, mock_retriever_factory): + def test_write_json_to_file( + self, + mock_get_tokenizer, + mock_open_fn, + mock_converter_factory, + mock_retriever_factory, + ): mock_tokenizer = MagicMock(spec=Tokenizer) mock_get_tokenizer.return_value = mock_tokenizer @@ -74,7 +98,7 @@ def test_write_json_to_file(self, mock_get_tokenizer, mock_open_fn, mock_convert files_data={ "file1.jsonl": FileData( filename="file1.jsonl", - rows=[DataRow(texts=["test input one"], images=[])] + rows=[DataRow(texts=["test input one"], images=[])], ) } ) @@ -82,7 +106,6 @@ def test_write_json_to_file(self, mock_get_tokenizer, mock_open_fn, mock_convert mock_retriever_factory.return_value.retrieve_data.return_value = generic_dataset expected_output = {"data": "some converted data"} mock_converter_factory.return_value.convert.return_value = expected_output - inputs = Inputs( InputsConfig( @@ -95,7 +118,13 @@ def test_write_json_to_file(self, mock_get_tokenizer, mock_open_fn, mock_convert inputs.create_inputs() mock_retriever_factory.return_value.retrieve_data.assert_called_once() - mock_converter_factory.return_value.convert.assert_called_once_with(generic_dataset, inputs.config) + mock_converter_factory.return_value.convert.assert_called_once_with( + generic_dataset, inputs.config + ) - mock_open_fn.assert_called_once_with(str(inputs.config.output_dir / "inputs.json"), "w") - mock_open_fn().write.assert_called_once_with(json.dumps(expected_output, indent=2)) + mock_open_fn.assert_called_once_with( + str(inputs.config.output_dir / "inputs.json"), "w" + ) + mock_open_fn().write.assert_called_once_with( + json.dumps(expected_output, indent=2) + ) diff --git a/genai-perf/tests/test_openai_chat_converter.py b/genai-perf/tests/test_openai_chat_converter.py index cd9c5828..f63e7c78 100644 --- a/genai-perf/tests/test_openai_chat_converter.py +++ b/genai-perf/tests/test_openai_chat_converter.py @@ -24,12 +24,17 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from typing import Any, Dict, List + import pytest -from typing import Any, List, Dict from genai_perf.inputs.converters import OpenAIChatCompletionsConverter from genai_perf.inputs.input_constants import ModelSelectionStrategy, OutputFormat from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.inputs.retrievers.generic_dataset import GenericDataset, FileData, DataRow +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) class TestOpenAIChatCompletionsConverter: @@ -57,15 +62,13 @@ def clean_image(row): "file1": FileData( filename="file1", rows=[ - DataRow( - texts=clean_text(row), - images=clean_image(row) - ) + DataRow(texts=clean_text(row), images=clean_image(row)) for row in rows - ] + ], ) } ) + def test_convert_default(self): generic_dataset = self.create_generic_dataset( [{"text": "text input one"}, {"text": "text input two"}] diff --git a/genai-perf/tests/test_openai_completions_converter.py b/genai-perf/tests/test_openai_completions_converter.py index 521709c8..52fd211d 100644 --- a/genai-perf/tests/test_openai_completions_converter.py +++ b/genai-perf/tests/test_openai_completions_converter.py @@ -25,9 +25,17 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from genai_perf.inputs.converters import OpenAICompletionsConverter -from genai_perf.inputs.input_constants import ModelSelectionStrategy, OutputFormat, DEFAULT_OUTPUT_TOKENS_MEAN +from genai_perf.inputs.input_constants import ( + DEFAULT_OUTPUT_TOKENS_MEAN, + ModelSelectionStrategy, + OutputFormat, +) from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.inputs.retrievers.generic_dataset import GenericDataset, FileData, DataRow +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) class TestOpenAICompletionsConverter: @@ -41,8 +49,8 @@ def create_generic_dataset() -> GenericDataset: filename="file1", rows=[ DataRow(texts=["text input one"]), - DataRow(texts=["text input two"]) - ] + DataRow(texts=["text input two"]), + ], ) } ) diff --git a/genai-perf/tests/test_rankings_converter.py b/genai-perf/tests/test_rankings_converter.py index 32278e9c..0bc26083 100644 --- a/genai-perf/tests/test_rankings_converter.py +++ b/genai-perf/tests/test_rankings_converter.py @@ -24,12 +24,17 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from typing import List, Optional + import pytest from genai_perf.inputs.converters import RankingsConverter from genai_perf.inputs.input_constants import ModelSelectionStrategy, OutputFormat from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.inputs.retrievers.generic_dataset import GenericDataset, DataRow, FileData -from typing import List, Optional +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) class TestRankingsConverter: @@ -37,20 +42,20 @@ class TestRankingsConverter: @staticmethod def create_generic_dataset( queries_data: Optional[List[List[str]]] = None, - passages_data: Optional[List[List[str]]] = None + passages_data: Optional[List[List[str]]] = None, ) -> GenericDataset: files_data = {} if queries_data is not None: files_data["queries"] = FileData( filename="queries", - rows=[DataRow(texts=query) for query in queries_data] + rows=[DataRow(texts=query) for query in queries_data], ) if passages_data is not None: files_data["passages"] = FileData( filename="passages", - rows=[DataRow(texts=passage) for passage in passages_data] + rows=[DataRow(texts=passage) for passage in passages_data], ) return GenericDataset(files_data=files_data) @@ -58,7 +63,7 @@ def create_generic_dataset( def test_convert_default(self): generic_dataset = self.create_generic_dataset( queries_data=[["query 1"], ["query 2"]], - passages_data=[["passage 1", "passage 2"], ["passage 3", "passage 4"]] + passages_data=[["passage 1", "passage 2"], ["passage 3", "passage 4"]], ) config = InputsConfig( @@ -77,7 +82,10 @@ def test_convert_default(self): "payload": [ { "query": "query 1", - "passages": [{"text_input": "passage 1"}, {"text_input": "passage 2"}], + "passages": [ + {"text_input": "passage 1"}, + {"text_input": "passage 2"}, + ], "model": "test_model", } ] @@ -86,7 +94,10 @@ def test_convert_default(self): "payload": [ { "query": "query 2", - "passages": [{"text_input": "passage 3"}, {"text_input": "passage 4"}], + "passages": [ + {"text_input": "passage 3"}, + {"text_input": "passage 4"}, + ], "model": "test_model", } ] @@ -99,7 +110,7 @@ def test_convert_default(self): def test_convert_with_request_parameters(self): generic_dataset = self.create_generic_dataset( queries_data=[["query 1"], ["query 2"]], - passages_data=[["passage 1", "passage 2"], ["passage 3", "passage 4"]] + passages_data=[["passage 1", "passage 2"], ["passage 3", "passage 4"]], ) extra_inputs = { @@ -124,7 +135,10 @@ def test_convert_with_request_parameters(self): "payload": [ { "query": "query 1", - "passages": [{"text_input": "passage 1"}, {"text_input": "passage 2"}], + "passages": [ + {"text_input": "passage 1"}, + {"text_input": "passage 2"}, + ], "model": "test_model", "encoding_format": "base64", "truncate": "END", @@ -136,7 +150,10 @@ def test_convert_with_request_parameters(self): "payload": [ { "query": "query 2", - "passages": [{"text_input": "passage 3"}, {"text_input": "passage 4"}], + "passages": [ + {"text_input": "passage 3"}, + {"text_input": "passage 4"}, + ], "model": "test_model", "encoding_format": "base64", "truncate": "END", @@ -152,7 +169,7 @@ def test_convert_with_request_parameters(self): def test_convert_huggingface_tei(self): generic_dataset = self.create_generic_dataset( queries_data=[["query 1"], ["query 2"]], - passages_data=[["passage 1", "passage 2"], ["passage 3", "passage 4"]] + passages_data=[["passage 1", "passage 2"], ["passage 3", "passage 4"]], ) extra_inputs = { @@ -198,15 +215,26 @@ def test_convert_huggingface_tei(self): @pytest.mark.parametrize( "queries_data, passages_data, expected_error", [ - (None, [["passage 1"], ["passage 2"]], "Both 'queries.jsonl' and 'passages.jsonl' must be present in the input datasets."), - ([["query 1"], ["query 2"]], None, "Both 'queries.jsonl' and 'passages.jsonl' must be present in the input datasets."), - (None, None, "Both 'queries.jsonl' and 'passages.jsonl' must be present in the input datasets."), - ] + ( + None, + [["passage 1"], ["passage 2"]], + "Both 'queries.jsonl' and 'passages.jsonl' must be present in the input datasets.", + ), + ( + [["query 1"], ["query 2"]], + None, + "Both 'queries.jsonl' and 'passages.jsonl' must be present in the input datasets.", + ), + ( + None, + None, + "Both 'queries.jsonl' and 'passages.jsonl' must be present in the input datasets.", + ), + ], ) def test_convert_missing_files(self, queries_data, passages_data, expected_error): generic_dataset = self.create_generic_dataset( - queries_data=queries_data, - passages_data=passages_data + queries_data=queries_data, passages_data=passages_data ) config = InputsConfig( @@ -220,7 +248,7 @@ def test_convert_missing_files(self, queries_data, passages_data, expected_error with pytest.raises(ValueError) as excinfo: rankings_converter.convert(generic_dataset, config) - + assert str(excinfo.value) == expected_error @pytest.mark.parametrize( @@ -249,9 +277,9 @@ def test_convert_missing_files(self, queries_data, passages_data, expected_error "model": "test_model", } ] - } + }, ] - } + }, ), # More passages than queries ( @@ -269,14 +297,15 @@ def test_convert_missing_files(self, queries_data, passages_data, expected_error ] } ] - } - ) - ] + }, + ), + ], ) - def test_convert_mismatched_queries_and_passages(self, queries_data, passages_data, expected_result): + def test_convert_mismatched_queries_and_passages( + self, queries_data, passages_data, expected_result + ): generic_dataset = self.create_generic_dataset( - queries_data=queries_data, - passages_data=passages_data + queries_data=queries_data, passages_data=passages_data ) config = InputsConfig( diff --git a/genai-perf/tests/test_synthetic_data_retriever.py b/genai-perf/tests/test_synthetic_data_retriever.py index d6306274..f9fc69c2 100644 --- a/genai-perf/tests/test_synthetic_data_retriever.py +++ b/genai-perf/tests/test_synthetic_data_retriever.py @@ -14,11 +14,12 @@ from typing import cast from unittest.mock import patch + import pytest +from genai_perf.inputs.input_constants import DEFAULT_SYNTHETIC_FILENAME, OutputFormat from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.inputs.retrievers.synthetic_data_retriever import SyntheticDataRetriever -from genai_perf.inputs.input_constants import OutputFormat, DEFAULT_SYNTHETIC_FILENAME from genai_perf.inputs.retrievers.generic_dataset import GenericDataset +from genai_perf.inputs.retrievers.synthetic_data_retriever import SyntheticDataRetriever class TestSyntheticDataRetriever: @@ -38,10 +39,10 @@ def test_synthetic_text(self, mock_prompt, batch_size_text, num_prompts): num_prompts=num_prompts, batch_size_text=batch_size_text, output_format=OutputFormat.OPENAI_COMPLETIONS, - synthetic_input_filenames=[DEFAULT_SYNTHETIC_FILENAME] + synthetic_input_filenames=[DEFAULT_SYNTHETIC_FILENAME], ) synthetic_retriever = SyntheticDataRetriever(config) - dataset: GenericDataset = synthetic_retriever.retrieve_data() + dataset = synthetic_retriever.retrieve_data() synthetic_input_filenames = cast(list[str], config.synthetic_input_filenames) assert len(dataset.files_data[synthetic_input_filenames[0]].rows) == num_prompts @@ -73,10 +74,10 @@ def test_synthetic_text_and_image( batch_size_text=batch_size_text, batch_size_image=batch_size_image, output_format=OutputFormat.OPENAI_VISION, - synthetic_input_filenames=[DEFAULT_SYNTHETIC_FILENAME] + synthetic_input_filenames=[DEFAULT_SYNTHETIC_FILENAME], ) synthetic_retriever = SyntheticDataRetriever(config) - dataset: GenericDataset = synthetic_retriever.retrieve_data() + dataset = synthetic_retriever.retrieve_data() synthetic_input_filenames = cast(list[str], config.synthetic_input_filenames) assert len(dataset.files_data[synthetic_input_filenames[0]].rows) == num_prompts @@ -85,7 +86,10 @@ def test_synthetic_text_and_image( assert len(row.texts) == batch_size_text assert len(row.images) == batch_size_image assert all(text == "test prompt" for text in row.texts) - assert all(image == "data:image/jpeg;base64,test_base64_encoding" for image in row.images) + assert all( + image == "data:image/jpeg;base64,test_base64_encoding" + for image in row.images + ) @patch( "genai_perf.inputs.retrievers.synthetic_data_retriever.SyntheticPromptGenerator.create_synthetic_prompt", @@ -107,7 +111,7 @@ def test_synthetic_multiple_files(self, mock_prompt, mock_image): output_format=OutputFormat.OPENAI_VISION, ) synthetic_retriever = SyntheticDataRetriever(config) - dataset: GenericDataset = synthetic_retriever.retrieve_data() + dataset = synthetic_retriever.retrieve_data() assert len(dataset.files_data) == 2 assert "file1.jsonl" in dataset.files_data diff --git a/genai-perf/tests/test_tensorrtllm_engine_converter.py b/genai-perf/tests/test_tensorrtllm_engine_converter.py index 73b4e3f3..82d97e7f 100644 --- a/genai-perf/tests/test_tensorrtllm_engine_converter.py +++ b/genai-perf/tests/test_tensorrtllm_engine_converter.py @@ -25,6 +25,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters import TensorRTLLMEngineConverter from genai_perf.inputs.input_constants import ( DEFAULT_TENSORRTLLM_MAX_TOKENS, @@ -32,9 +33,12 @@ OutputFormat, ) from genai_perf.inputs.inputs_config import InputsConfig +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) from genai_perf.tokenizer import DEFAULT_TOKENIZER, get_tokenizer -from genai_perf.inputs.retrievers.generic_dataset import GenericDataset, FileData, DataRow -from genai_perf.exceptions import GenAIPerfException class TestTensorRTLLMEngineConverter: @@ -48,8 +52,8 @@ def create_generic_dataset() -> GenericDataset: filename="file1", rows=[ DataRow(texts=["text input one"]), - DataRow(texts=["text input two"]) - ] + DataRow(texts=["text input two"]), + ], ) } ) diff --git a/genai-perf/tests/test_triton_tensorrtllm_converter.py b/genai-perf/tests/test_triton_tensorrtllm_converter.py index 35e73c8c..81a50052 100644 --- a/genai-perf/tests/test_triton_tensorrtllm_converter.py +++ b/genai-perf/tests/test_triton_tensorrtllm_converter.py @@ -31,7 +31,12 @@ OutputFormat, ) from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.inputs.retrievers.generic_dataset import GenericDataset, DataRow, FileData +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) + class TestTensorRTLLMConverter: @@ -44,8 +49,8 @@ def create_generic_dataset(): filename="file1", rows=[ DataRow(texts=["text input one"]), - DataRow(texts=["text input two"]) - ] + DataRow(texts=["text input two"]), + ], ) } ) diff --git a/genai-perf/tests/test_triton_vllm_converter.py b/genai-perf/tests/test_triton_vllm_converter.py index 6627ed53..826e5f64 100644 --- a/genai-perf/tests/test_triton_vllm_converter.py +++ b/genai-perf/tests/test_triton_vllm_converter.py @@ -25,11 +25,16 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest +from genai_perf.exceptions import GenAIPerfException from genai_perf.inputs.converters import VLLMConverter from genai_perf.inputs.input_constants import ModelSelectionStrategy, OutputFormat from genai_perf.inputs.inputs_config import InputsConfig -from genai_perf.exceptions import GenAIPerfException -from genai_perf.inputs.retrievers.generic_dataset import GenericDataset, FileData, DataRow +from genai_perf.inputs.retrievers.generic_dataset import ( + DataRow, + FileData, + GenericDataset, +) + class TestVLLMConverter: @@ -42,12 +47,12 @@ def create_generic_dataset(): filename="file1", rows=[ DataRow(texts=["text input one"]), - DataRow(texts=["text input two"]) - ] + DataRow(texts=["text input two"]), + ], ) } ) - + def test_convert_default(self): generic_dataset = self.create_generic_dataset() @@ -172,10 +177,10 @@ def test_check_config_invalid_batch_size(self): ) vllm_converter = VLLMConverter() - + with pytest.raises(GenAIPerfException) as exc_info: vllm_converter.check_config(config) - + assert str(exc_info.value) == ( "The --batch-size-text flag is not supported for vllm." ) diff --git a/templates/genai-perf-templates/README_template b/templates/genai-perf-templates/README_template index 67676887..39da14ab 100644 --- a/templates/genai-perf-templates/README_template +++ b/templates/genai-perf-templates/README_template @@ -282,7 +282,7 @@ When the dataset is synthetic, you can specify the following options: When the dataset is coming from a file, you can specify the following options: -* `--input-file `: The input file or directory containing the prompts or +* `--input-file `: The input file or directory containing the prompts or filepaths to images to use for benchmarking as JSON objects. For any dataset, you can specify the following options: @@ -420,7 +420,7 @@ Alternatively, a string representing a json formatted dict can be provided. ##### `--input-file ` -The input file or directory containing the content to use for +The input file or directory containing the content to use for profiling. To use synthetic files for a converter that needs multiple files, prefix the path with 'synthetic:', followed by a comma-separated list of filenames. The synthetic filenames should not have