Skip to content

Commit

Permalink
fix naming
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil committed Dec 11, 2024
1 parent 7faf1e6 commit c92db73
Show file tree
Hide file tree
Showing 8 changed files with 19 additions and 19 deletions.
6 changes: 3 additions & 3 deletions optimum_benchmark/backends/ipex/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from ..base import Backend
from ..transformers_utils import fast_weights_init
from .config import IPEXConfig
from .utils import TASKS_TO_IPEXMODEL
from .utils import TASKS_TO_IPEXMODELS

if is_accelerate_available():
from accelerate import Accelerator
Expand All @@ -24,8 +24,8 @@ class IPEXBackend(Backend[IPEXConfig]):
def __init__(self, config: IPEXConfig) -> None:
super().__init__(config)

if self.config.task in TASKS_TO_IPEXMODEL:
self.ipexmodel_class = get_class(TASKS_TO_IPEXMODEL[self.config.task])
if self.config.task in TASKS_TO_IPEXMODELS:
self.ipexmodel_class = get_class(TASKS_TO_IPEXMODELS[self.config.task])
self.logger.info(f"\t+ Using IPEXModel class {self.ipexmodel_class.__name__}")
else:
raise NotImplementedError(f"IPEXBackend does not support task {self.config.task}")
Expand Down
2 changes: 1 addition & 1 deletion optimum_benchmark/backends/ipex/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
TASKS_TO_IPEXMODEL = {
TASKS_TO_IPEXMODELS = {
"fill-mask": "optimum.intel.IPEXModelForMaskedLM",
"text-generation": "optimum.intel.IPEXModelForCausalLM",
"feature-extraction": "optimum.intel.IPEXModel",
Expand Down
6 changes: 3 additions & 3 deletions optimum_benchmark/backends/onnxruntime/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from .config import ORTConfig
from .utils import (
TASKS_TO_ORTMODELS,
TASKS_TO_ORTPIPELINE,
TASKS_TO_ORTPIPELINES,
format_calibration_config,
format_quantization_config,
)
Expand All @@ -49,8 +49,8 @@ def __init__(self, config: ORTConfig) -> None:
if self.config.library != "diffusers" and self.config.task in TASKS_TO_ORTMODELS:
self.ort_model_loader = get_class(TASKS_TO_ORTMODELS[self.config.task])
self.logger.info(f"Using ORTModel class {self.ort_model_loader.__name__}")
elif self.config.library == "diffusers" and self.config.task in TASKS_TO_ORTPIPELINE:
self.ort_model_loader = get_class(TASKS_TO_ORTPIPELINE[self.config.task])
elif self.config.library == "diffusers" and self.config.task in TASKS_TO_ORTPIPELINES:
self.ort_model_loader = get_class(TASKS_TO_ORTPIPELINES[self.config.task])
self.logger.info(f"Using ORTDiffusionPipeline class {self.ort_model_loader.__name__}")
else:
raise NotImplementedError(f"ORTBackend does not support task {self.config.task}")
Expand Down
2 changes: 1 addition & 1 deletion optimum_benchmark/backends/onnxruntime/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
task: f"optimum.onnxruntime.{task_dict['class'][0].__name__}" for task, task_dict in ORT_SUPPORTED_TASKS.items()
}

TASKS_TO_ORTPIPELINE = {
TASKS_TO_ORTPIPELINES = {
"inpainting": "optimum.onnxruntime.ORTPipelineForInpainting",
"text-to-image": "optimum.onnxruntime.ORTPipelineForText2Image",
"image-to-image": "optimum.onnxruntime.ORTPipelineForImage2Image",
Expand Down
10 changes: 5 additions & 5 deletions optimum_benchmark/backends/openvino/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from ..base import Backend
from ..transformers_utils import fast_weights_init
from .config import OVConfig as OVBackendConfig
from .utils import TASKS_OVPIPELINE, TASKS_TO_OVMODEL
from .utils import TASKS_TO_OVMODELS, TASKS_TO_OVPIPELINES

if is_accelerate_available():
from accelerate import Accelerator
Expand All @@ -24,11 +24,11 @@ class OVBackend(Backend[OVBackendConfig]):
def __init__(self, config: OVBackendConfig) -> None:
super().__init__(config)

if self.config.task in TASKS_TO_OVMODEL:
self.ovmodel_class = get_class(TASKS_TO_OVMODEL[self.config.task])
if self.config.library != "diffusers" and self.config.task in TASKS_TO_OVMODELS:
self.ovmodel_class = get_class(TASKS_TO_OVMODELS[self.config.task])
self.logger.info(f"\t+ Using OVModel class {self.ovmodel_class.__name__}")
elif self.config.task in TASKS_OVPIPELINE:
self.ovmodel_class = get_class(TASKS_OVPIPELINE[self.config.task])
elif self.config.library == "diffusers" and self.config.task in TASKS_TO_OVPIPELINES:
self.ovmodel_class = get_class(TASKS_TO_OVPIPELINES[self.config.task])
self.logger.info(f"\t+ Using OVDiffusionPipeline class {self.ovmodel_class.__name__}")
else:
raise NotImplementedError(f"OVBackend does not support task {self.config.task}")
Expand Down
4 changes: 2 additions & 2 deletions optimum_benchmark/backends/openvino/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
TASKS_TO_OVMODEL = {
TASKS_TO_OVMODELS = {
"fill-mask": "optimum.intel.openvino.OVModelForMaskedLM",
"text-generation": "optimum.intel.openvino.OVModelForCausalLM",
"text2text-generation": "optimum.intel.openvino.OVModelForSeq2SeqLM",
Expand All @@ -10,7 +10,7 @@
"audio-classification": "optimum.intel.openvino.OVModelForAudioClassification",
"pix2struct": "optimum.intel.openvino.OVModelForPix2Struct",
}
TASKS_OVPIPELINE = {
TASKS_TO_OVPIPELINES = {
"inpainting": "optimum.intel.openvino.OVPipelineForInpainting",
"text-to-image": "optimum.intel.openvino.OVPipelineForText2Image",
"image-to-image": "optimum.intel.openvino.OVPipelineForImage2Image",
Expand Down
6 changes: 3 additions & 3 deletions optimum_benchmark/backends/tensorrt_llm/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from ..base import Backend
from ..transformers_utils import fast_weights_init
from .config import TRTLLMConfig
from .utils import MODEL_TYPE_TO_TRTLLMMODEL
from .utils import MODEL_TYPE_TO_TRTLLMMODELS


class TRTLLMBackend(Backend[TRTLLMConfig]):
Expand All @@ -21,8 +21,8 @@ class TRTLLMBackend(Backend[TRTLLMConfig]):
def __init__(self, config: TRTLLMConfig):
super().__init__(config)

if self.config.model_type in MODEL_TYPE_TO_TRTLLMMODEL:
self.trtllm_loader = get_class(MODEL_TYPE_TO_TRTLLMMODEL[self.config.model_type])
if self.config.model_type in MODEL_TYPE_TO_TRTLLMMODELS:
self.trtllm_loader = get_class(MODEL_TYPE_TO_TRTLLMMODELS[self.config.model_type])
self.logger.info(f"\t+ Using TRTLLMModel class {self.trtllm_loader.__name__}")
else:
raise NotImplementedError(f"TRTLLMBackend does not support model_type {self.config.model_type}")
Expand Down
2 changes: 1 addition & 1 deletion optimum_benchmark/backends/tensorrt_llm/utils.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
MODEL_TYPE_TO_TRTLLMMODEL = {"llama": "optimum.nvidia.models.llama.LlamaForCausalLM"}
MODEL_TYPE_TO_TRTLLMMODELS = {"llama": "optimum.nvidia.models.llama.LlamaForCausalLM"}

0 comments on commit c92db73

Please sign in to comment.