Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
… into main
  • Loading branch information
IlyasMoutawwakil committed Feb 21, 2024
2 parents 8677dba + 6d39b79 commit 580251a
Show file tree
Hide file tree
Showing 6 changed files with 73 additions and 54 deletions.
20 changes: 12 additions & 8 deletions optimum_benchmark/backends/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@
from ..task_utils import get_automodel_class_for_task
from .config import BackendConfigT
from .diffusers_utils import extract_diffusers_shapes_from_model, get_diffusers_pretrained_config
from .timm_utils import extract_timm_shapes_from_config, get_timm_pre_processor, get_timm_pretrained_config
from .timm_utils import extract_timm_shapes_from_config, get_timm_pretrained_config
from .transformers_utils import (
PretrainedProcessor,
extract_transformers_shapes_from_artifacts,
get_transformers_generation_config,
get_transformers_pre_processor,
get_transformers_pretrained_config,
get_transformers_pretrained_processor,
)

LOGGER = getLogger("backend")
Expand All @@ -32,7 +32,7 @@ class Backend(Generic[BackendConfigT], ABC):
pretrained_model: PreTrainedModel
pretrained_config: Optional[PretrainedConfig]
generation_config: Optional[GenerationConfig]
pre_processor: Optional[PretrainedProcessor]
pretrained_processor: Optional[PretrainedProcessor]

def __init__(self, config: BackendConfigT):
LOGGER.info(f"َAllocating {self.NAME} backend")
Expand All @@ -43,21 +43,25 @@ def __init__(self, config: BackendConfigT):
self.pretrained_config = get_diffusers_pretrained_config(self.config.model, **self.config.hub_kwargs)
self.model_shapes = extract_diffusers_shapes_from_model(self.config.model, **self.config.hub_kwargs)
self.model_type = self.config.task
self.pretrained_processor = None
self.generation_config = None
self.pre_processor = None

elif self.config.library == "timm":
self.pre_processor = get_timm_pre_processor(self.config.model)
self.pretrained_config = get_timm_pretrained_config(self.config.model)
self.model_shapes = extract_timm_shapes_from_config(config=self.pretrained_config)
self.model_shapes = extract_timm_shapes_from_config(self.pretrained_config)
self.model_type = self.pretrained_config.architecture
self.pretrained_processor = None
self.generation_config = None

else:
self.pre_processor = get_transformers_pre_processor(self.config.model, **self.config.hub_kwargs)
self.pretrained_processor = get_transformers_pretrained_processor(
self.config.model, **self.config.hub_kwargs
)
self.generation_config = get_transformers_generation_config(self.config.model, **self.config.hub_kwargs)
self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.hub_kwargs)
self.model_shapes = extract_transformers_shapes_from_artifacts(self.pretrained_config, self.pre_processor)
self.model_shapes = extract_transformers_shapes_from_artifacts(
self.pretrained_config, self.pretrained_processor
)
self.model_type = self.pretrained_config.model_type

self.automodel_class = get_automodel_class_for_task(
Expand Down
9 changes: 1 addition & 8 deletions optimum_benchmark/backends/pytorch/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from transformers import Trainer, TrainerCallback, TrainerState, TrainingArguments
from transformers.modeling_utils import no_init_weights

from ...import_utils import is_deepspeed_available, is_peft_available, is_torch_distributed_available
from ...import_utils import is_deepspeed_available, is_peft_available
from ..base import Backend
from ..peft_utils import get_peft_config_class
from ..transformers_utils import randomize_weights
Expand All @@ -22,9 +22,6 @@
if is_peft_available():
from peft import get_peft_model # type: ignore

if is_torch_distributed_available():
import torch.distributed

if is_deepspeed_available():
from deepspeed import init_inference # type: ignore

Expand Down Expand Up @@ -360,10 +357,6 @@ def seed(self):
torch.cuda.manual_seed_all(self.config.seed)

def clean(self) -> None:
if is_torch_distributed_available() and torch.distributed.is_initialized():
LOGGER.info("\t+ Waiting for distributed processes to finish before cleaning backend")
torch.distributed.barrier()

super().clean()

if hasattr(self, "tmpdir"):
Expand Down
2 changes: 1 addition & 1 deletion optimum_benchmark/backends/transformers_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def get_transformers_generation_config(model: str, **kwargs) -> Optional["Genera
return None


def get_transformers_pre_processor(model: str, **kwargs) -> Optional["PretrainedProcessor"]:
def get_transformers_pretrained_processor(model: str, **kwargs) -> Optional["PretrainedProcessor"]:
try:
# sometimes contains information about the model's input shapes that are not available in the config
return AutoProcessor.from_pretrained(model, **kwargs)
Expand Down
8 changes: 5 additions & 3 deletions optimum_benchmark/import_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import importlib.metadata
import importlib.util
import subprocess
from pathlib import Path
from subprocess import STDOUT, check_output
from typing import Optional

_transformers_available = importlib.util.find_spec("transformers") is not None
Expand Down Expand Up @@ -188,12 +189,13 @@ def get_git_revision_hash(package_name: str) -> Optional[str]:
"""

try:
path = importlib.util.find_spec(package_name).origin
path = Path(importlib.util.find_spec(package_name).origin).parent
except Exception:
return None

try:
git_hash = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=path).decode().strip()
git_hash = check_output(["git", "rev-parse", "HEAD"], cwd=path, stderr=STDOUT).strip().decode("utf-8")

except Exception:
return None

Expand Down
6 changes: 5 additions & 1 deletion optimum_benchmark/launchers/torchrun/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,17 @@ def entrypoint(worker, queue, lock, log_level, *worker_args):
"""

rank = int(os.environ["RANK"])
torch.cuda.set_device(rank) if torch.cuda.is_available() else None
setup_logging(level=log_level, prefix=f"RANK-{rank}") if rank == 0 else None

torch.cuda.set_device(rank) if torch.cuda.is_available() else None
torch.distributed.init_process_group(backend="nccl" if torch.cuda.is_available() else "gloo")
torch.distributed.barrier()

output = worker(*worker_args)

torch.distributed.barrier()
torch.distributed.destroy_process_group()

lock.acquire()
queue.put(output)
lock.release()
82 changes: 49 additions & 33 deletions tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,14 @@
from optimum_benchmark.backends.transformers_utils import (
extract_transformers_shapes_from_artifacts,
get_transformers_pretrained_config,
get_transformers_pretrained_processor,
)
from optimum_benchmark.benchmarks.inference.config import INPUT_SHAPES, InferenceConfig
from optimum_benchmark.benchmarks.training.config import DATASET_SHAPES
from optimum_benchmark.experiment import ExperimentConfig, launch
from optimum_benchmark.generators.dataset_generator import DatasetGenerator
from optimum_benchmark.generators.input_generator import InputGenerator
from optimum_benchmark.import_utils import get_git_revision_hash
from optimum_benchmark.launchers.inline.config import InlineConfig
from optimum_benchmark.launchers.process.config import ProcessConfig
from optimum_benchmark.launchers.torchrun.config import TorchrunConfig
Expand All @@ -28,7 +30,6 @@

LIBRARIES_TASKS_MODELS = [
("transformers", "fill-mask", "bert-base-uncased"),
("timm", "image-classification", "timm/resnet50.a1_in1k"),
("transformers", "text-generation", "openai-community/gpt2"),
("transformers", "text2text-generation", "google-t5/t5-small"),
("transformers", "multiple-choice", "FacebookAI/roberta-base"),
Expand All @@ -37,6 +38,8 @@
("transformers", "token-classification", "microsoft/deberta-v3-base"),
("transformers", "image-classification", "google/vit-base-patch16-224"),
("transformers", "semantic-segmentation", "google/vit-base-patch16-224"),
("diffusers", "stable-diffusion", "CompVis/stable-diffusion-v1-4"),
("timm", "image-classification", "timm/resnet50.a1_in1k"),
]
LAUNCHER_CONFIGS = [
InlineConfig(device_isolation=False),
Expand Down Expand Up @@ -107,17 +110,53 @@ def test_api_memory_tracker(device, backend):
gc.collect()


@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("launcher_config", LAUNCHER_CONFIGS)
def test_api_launch(device, launcher_config):
benchmark_config = InferenceConfig(latency=True, memory=True)
backend_config = PyTorchConfig(
model="bert-base-uncased",
device_ids="0,1" if device == "cuda" else None,
no_weights=True,
device=device,
)
experiment_config = ExperimentConfig(
experiment_name="api-experiment",
benchmark=benchmark_config,
launcher=launcher_config,
backend=backend_config,
)

benchmark_report = launch(experiment_config)

with TemporaryDirectory() as tempdir:
experiment_config.to_dict()
experiment_config.to_flat_dict()
experiment_config.to_dataframe()
experiment_config.to_csv(f"{tempdir}/experiment_config.csv")
experiment_config.to_json(f"{tempdir}/experiment_config.json")

benchmark_report.to_dict()
benchmark_report.to_flat_dict()
benchmark_report.to_dataframe()
benchmark_report.to_csv(f"{tempdir}/benchmark_report.csv")
benchmark_report.to_json(f"{tempdir}/benchmark_report.json")


@pytest.mark.parametrize("library,task,model", LIBRARIES_TASKS_MODELS)
def test_api_input_generator(library, task, model):
if library == "transformers":
model_config = get_transformers_pretrained_config(model)
model_shapes = extract_transformers_shapes_from_artifacts(model_config)
model_processor = get_transformers_pretrained_processor(model)
model_shapes = extract_transformers_shapes_from_artifacts(model_config, model_processor)
elif library == "timm":
model_config = get_timm_pretrained_config(model)
model_shapes = extract_timm_shapes_from_config(model_config)
elif library == "diffusers":
model_config = get_diffusers_pretrained_config(model)
model_shapes = extract_diffusers_shapes_from_model(model)
else:
raise ValueError(f"Unknown library {library}")

input_generator = InputGenerator(task=task, input_shapes=INPUT_SHAPES, model_shapes=model_shapes)
generated_inputs = input_generator()
Expand All @@ -136,42 +175,19 @@ def test_api_dataset_generator(library, task, model):
elif library == "timm":
model_config = get_timm_pretrained_config(model)
model_shapes = extract_timm_shapes_from_config(config=model_config)
elif library == "diffusers":
model_config = get_diffusers_pretrained_config(model)
model_shapes = extract_diffusers_shapes_from_model(model)
else:
raise ValueError(f"Unknown library {library}")

generator = DatasetGenerator(task=task, dataset_shapes=DATASET_SHAPES, model_shapes=model_shapes)
generated_dataset = generator()

_ = generator()
assert len(generated_dataset) > 0, "No dataset was generated"

len(generated_dataset) == DATASET_SHAPES["dataset_size"]

@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("launcher_config", LAUNCHER_CONFIGS)
def test_api_launch(device, launcher_config):
benchmark_config = InferenceConfig(latency=True, memory=True)
backend_config = PyTorchConfig(
model="bert-base-uncased",
device_ids="0,1" if device == "cuda" else None,
no_weights=True,
device=device,
)
experiment_config = ExperimentConfig(
experiment_name="api-experiment",
benchmark=benchmark_config,
launcher=launcher_config,
backend=backend_config,
)

benchmark_report = launch(experiment_config)

with TemporaryDirectory() as tempdir:
experiment_config.to_dict()
experiment_config.to_flat_dict()
experiment_config.to_dataframe()
experiment_config.to_csv(f"{tempdir}/experiment_config.csv")
experiment_config.to_json(f"{tempdir}/experiment_config.json")

benchmark_report.to_dict()
benchmark_report.to_flat_dict()
benchmark_report.to_dataframe()
benchmark_report.to_csv(f"{tempdir}/benchmark_report.csv")
benchmark_report.to_json(f"{tempdir}/benchmark_report.json")
def test_git_revision_hash_detection():
assert get_git_revision_hash("optimum_benchmark") is not None

0 comments on commit 580251a

Please sign in to comment.