Skip to content

Commit

Permalink
Update to new evaluation in benchmark
Browse files Browse the repository at this point in the history
  • Loading branch information
Gistbatch committed Mar 26, 2024
1 parent 8f6565b commit 54bd777
Showing 1 changed file with 20 additions and 151 deletions.
171 changes: 20 additions & 151 deletions data/benchmark/heuristic_benchmark.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,25 @@
"""Generates the benchmark data."""

from uuid import uuid4
import logging

from mqt.bench import get_benchmark
from qiskit import QuantumCircuit
import numpy as np

from src.common import jobs_from_experiment, UserCircuit
from src.common import UserCircuit
from src.provider import Accelerator
from src.scheduling import (
Benchmark,
InfoProblem,
PTimes,
Result,
SchedulerType,
STimes,
generate_schedule,
)
from src.scheduling.heuristics import (
generate_heuristic_info_schedule as heuristic_schedule,
)
from src.scheduling.learning import generate_rl_schedule

from src.tools import cut_circuit
from src.utils.helpers import Timer

from .evaluate_baseline import evaluate_baseline


def _generate_batch(
max_qubits: int, circuits_per_batch: int, accelerators: list[Accelerator]
Expand All @@ -37,10 +32,11 @@ def _generate_batch(
circuit.remove_final_measurements(inplace=True)
user_circuit = UserCircuit(
circuit,
size,
1024,
np.random.randint(1, 10),
str(accelerators[np.random.randint(len(accelerators))].uuid),
np.random.randint(1, 3),
uuid4(),
)
batch.append(user_circuit)

Expand All @@ -50,7 +46,6 @@ def _generate_batch(
def run_heuristic_experiments(
circuits_per_batch: int,
settings: list[list[Accelerator]],
t_max: int,
num_batches: int,
) -> Benchmark:
"""Generates the benchmarks and executes scheduling."""
Expand All @@ -69,46 +64,30 @@ def run_heuristic_experiments(
logging.info("Running benchmark for setting.")
# Run the baseline model
with Timer() as t0:
circuits = [circuit.circuit for circuit in benchmark]
problem_circuits = _cut_circuits(circuits, setting)
logging.info("Setting up times...")

p_times = _get_benchmark_processing_times(problem_circuits, setting)
s_times = _get_benchmark_setup_times(
problem_circuits,
setting,
default_value=2**5,
)
logging.info("Setting up problems...")
problem = InfoProblem(
base_jobs=problem_circuits,
accelerators={str(acc.uuid): acc.qubits for acc in setting},
big_m=1000,
timesteps=t_max,
process_times=p_times,
setup_times=s_times,
)
makespan, jobs, _ = generate_schedule(problem, SchedulerType.BASELINE)
result["baseline"] = Result(makespan, jobs, t0.elapsed)
logging.info("Baseline model done: Makespan: %d.", makespan)
baseline, jobs = evaluate_baseline(benchmark, setting)
result["baseline"] = Result(
baseline[0], jobs, t0.elapsed, baseline[1], baseline[2]
)
logging.info("Baseline model done: Makespan: %d.", baseline[0])
# Run the reinforcement learning model

with Timer() as t1:
makespan, jobs = generate_rl_schedule(benchmark, setting)
result["RL"] = Result(makespan, jobs, t1.elapsed)
logging.info("RL model done: Makespan: %d.", makespan)
# with Timer() as t1:
# makespan, jobs = generate_rl_schedule(benchmark, setting)
# result["RL"] = Result(makespan, jobs, t1.elapsed)
# logging.info("RL model done: Makespan: %d.", makespan)
# Run the heurstic model
with Timer() as t2:
# TODO convert ScheduledJob to JobResultInfo
makespan, jobs = heuristic_schedule(
heuristic, jobs = heuristic_schedule(
benchmark,
setting,
num_iterations=128,
partition_size=4,
num_cores=16,
)
result["heuristic"] = Result(makespan, jobs, t2.elapsed)
logging.info("Heuristic model done: Makespan: %d.", makespan)
result["heuristic"] = Result(
heuristic[0], jobs, t2.elapsed, heuristic[1], heuristic[2]
)
logging.info("Heuristic model done: Makespan: %d.", heuristic[0])
# Store results
benchmark_results.append(result)
if len(benchmark_results) > 0:
Expand All @@ -119,113 +98,3 @@ def run_heuristic_experiments(
}
)
return results


def _cut_circuits(
circuits: list[QuantumCircuit], accelerators: list[Accelerator]
) -> list[QuantumCircuit]:
"""Cuts the circuits into smaller circuits."""
partitions = _generate_partitions(
[circuit.num_qubits for circuit in circuits], accelerators
)
logging.debug(
"Partitions: generated: %s",
" ".join(str(partition) for partition in partitions),
)
jobs = []
logging.debug("Cutting circuits...")
for idx, circuit in enumerate(circuits):
logging.debug("Cutting circuit %d", idx)
if len(partitions[idx]) > 1:
experiments, _ = cut_circuit(circuit, partitions[idx])
jobs += [
job.circuit
for experiment in experiments
for job in jobs_from_experiment(experiment)
]
else:
# assumption for now dont cut to any to smaller
jobs.append(circuit)
return jobs


def _generate_partitions(
circuit_sizes: list[int], accelerators: list[Accelerator]
) -> list[list[int]]:
partitions = []
qpu_sizes = [acc.qubits for acc in accelerators]
num_qubits: int = sum(qpu_sizes)
for circuit_size in circuit_sizes:
if circuit_size > num_qubits:
partition = qpu_sizes
remaining_size = circuit_size - num_qubits
while remaining_size > num_qubits:
partition += qpu_sizes
remaining_size -= num_qubits
if remaining_size == 1:
partition[-1] = partition[-1] - 1
partition.append(2)
else:
partition += _partition_big_to_small(remaining_size, accelerators)
partitions.append(partition)
elif circuit_size > max(qpu_sizes):
partition = _partition_big_to_small(circuit_size, accelerators)
partitions.append(partition)
else:
partitions.append([circuit_size])
return partitions


def _partition_big_to_small(size: int, accelerators: list[Accelerator]) -> list[int]:
partition = []
for qpu in sorted(accelerators, key=lambda a: a.qubits, reverse=True):
take_qubits = min(size, qpu.qubits)
if size - take_qubits == 1:
# We can't have a partition of size 1
# So in this case we take one qubit less to leave a partition of two
take_qubits -= 1
partition.append(take_qubits)
size -= take_qubits
if size == 0:
break
else:
raise ValueError(
"Circuit is too big to fit onto the devices,"
+ f" {size} qubits left after partitioning."
)
return partition


def _get_benchmark_processing_times(
base_jobs: list[QuantumCircuit],
accelerators: list[Accelerator],
) -> PTimes:
return [
[accelerator.compute_processing_time(job) for accelerator in accelerators]
for job in base_jobs
]


def _get_benchmark_setup_times(
base_jobs: list[QuantumCircuit],
accelerators: list[Accelerator],
default_value: float,
) -> STimes:
return [
[
[
(
default_value
if id_i in [id_j, 0]
else (
0
if job_j is None
else accelerator.compute_setup_time(job_i, job_j)
)
)
for accelerator in accelerators
]
for id_i, job_i in enumerate([None] + base_jobs)
]
for id_j, job_j in enumerate([None] + base_jobs)
]

0 comments on commit 54bd777

Please sign in to comment.