diff --git a/tests/benchmark-models/benchmark_models.yaml b/tests/benchmark-models/benchmark_models.yaml index 8550c9ca38..2fed74cf3f 100644 --- a/tests/benchmark-models/benchmark_models.yaml +++ b/tests/benchmark-models/benchmark_models.yaml @@ -57,7 +57,7 @@ Elowitz_Nature2000: t_adj: 0.11 note: benchmark collection reference value matches up to sign when applying log10-correction +sum(log(meas*log(10))) / 2 -Fiedler_BMC2016: +Fiedler_BMCSystBiol2016: llh: 58.58390161681 t_sim: 0.005 t_fwd: 0.05 diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py index bab18a1550..e3ad23c913 100644 --- a/tests/benchmark-models/test_petab_benchmark.py +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -38,11 +38,19 @@ from petab.v1.visualize import plot_problem -logger = get_logger(f"amici.{__name__}", logging.WARNING) +# Enable various debug output +debug = False + +logger = get_logger( + f"amici.{__name__}", logging.DEBUG if debug else logging.INFO +) script_dir = Path(__file__).parent.absolute() repo_root = script_dir.parent.parent benchmark_outdir = repo_root / "test_bmc" +debug_path = script_dir / "debug" +if debug: + debug_path.mkdir(exist_ok=True, parents=True) # reference values for simulation times and log-likelihoods references_yaml = script_dir / "benchmark_models.yaml" @@ -228,12 +236,6 @@ class GradientCheckSettings: ) -debug = False -if debug: - debug_path = Path(__file__).parent / "debug" - debug_path.mkdir(exist_ok=True, parents=True) - - @pytest.fixture(scope="session", params=problems, ids=problems) def benchmark_problem(request): """Fixture providing model and PEtab problem for a problem from @@ -632,9 +634,18 @@ def assert_gradient_check_success( df["rtol_success"] = df["rel_diff"] <= rtol max_adiff = df["abs_diff"].max() max_rdiff = df["rel_diff"].max() - with pd.option_context("display.max_columns", None, "display.width", None): + + success_fail = "succeeded" if check_result.success else "failed" + with pd.option_context( + "display.max_columns", + None, + "display.width", + None, + "display.max_rows", + None, + ): message = ( - f"Gradient check failed:\n{df}\n\n" + f"Gradient check {success_fail}:\n{df}\n\n" f"Maximum absolute difference: {max_adiff} (tolerance: {atol})\n" f"Maximum relative difference: {max_rdiff} (tolerance: {rtol})" )