diff --git a/.zenodo.json b/.zenodo.json index e0508e9..ff7d2f0 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -1,8 +1,8 @@ { "description": "smooth inference for reinterpretation studies", "license": "MIT", - "title": "SpeysideHEP/spey: v0.1.10", - "version": "v0.1.10", + "title": "SpeysideHEP/spey: v0.1.11", + "version": "v0.1.11", "upload_type": "software", "creators": [ { @@ -31,7 +31,7 @@ }, { "scheme": "url", - "identifier": "https://github.com/SpeysideHEP/spey/tree/v0.1.10", + "identifier": "https://github.com/SpeysideHEP/spey/tree/v0.1.11", "relation": "isSupplementTo" }, { diff --git a/docs/releases/changelog-v0.1.md b/docs/releases/changelog-v0.1.md index 9e0a563..964fa85 100644 --- a/docs/releases/changelog-v0.1.md +++ b/docs/releases/changelog-v0.1.md @@ -82,6 +82,9 @@ Specific upgrades for the latest release can be found [here](https://github.com/ * p-value computation in $\chi^2$ and toy calculators have been fixed. ([#42](https://github.com/SpeysideHEP/spey/pull/42)) +* Bugfix in uncorrelated statistics combiner. This is due to a crash while computing $\sigma_\mu$ through inverse of the Hessian matrix. + ([#43](https://github.com/SpeysideHEP/spey/pull/43)) + ## Contributors This release contains contributions from (in alphabetical order): diff --git a/src/spey/_version.py b/src/spey/_version.py index 7c416db..c0a70a3 100644 --- a/src/spey/_version.py +++ b/src/spey/_version.py @@ -1,3 +1,3 @@ """Version number (major.minor.patch[-label])""" -__version__ = "0.1.10" +__version__ = "0.1.11-beta" diff --git a/src/spey/backends/default_pdf/third_moment.py b/src/spey/backends/default_pdf/third_moment.py index 87e54f1..cb8179c 100644 --- a/src/spey/backends/default_pdf/third_moment.py +++ b/src/spey/backends/default_pdf/third_moment.py @@ -56,10 +56,9 @@ def third_moment_expansion( ) ) if len(w) > 0: - warnings.warn( + log.warning( "8 * diag(cov)**3 >= third_moment**2 condition is not satisfied," - " setting nan values to zero.", - category=RuntimeWarning, + " setting nan values to zero." ) C = np.where(np.isnan(C), 0.0, C) log.debug(f"C: {C}") diff --git a/src/spey/combiner/uncorrelated_statistics_combiner.py b/src/spey/combiner/uncorrelated_statistics_combiner.py index 00c0d48..9056728 100644 --- a/src/spey/combiner/uncorrelated_statistics_combiner.py +++ b/src/spey/combiner/uncorrelated_statistics_combiner.py @@ -3,6 +3,7 @@ of different statistical models for hypothesis testing """ +import logging import warnings from typing import Dict, Iterator, List, Optional, Text, Tuple, Union @@ -17,6 +18,8 @@ __all__ = ["UnCorrStatisticsCombiner"] +log = logging.getLogger("Spey") + class UnCorrStatisticsCombiner(HypothesisTestingBase): """ @@ -494,27 +497,31 @@ def maximize_likelihood( # muhat initial value estimation in gaussian limit mu_init = initial_muhat_value or 0.0 if initial_muhat_value is None: - _mu, _sigma_mu = np.zeros(len(self)), np.ones(len(self)) - for idx, stat_model in enumerate(self): + try: + _mu, _sigma_mu = np.zeros(len(self)), np.ones(len(self)) + for idx, stat_model in enumerate(self): - current_kwargs = {} - current_kwargs.update( - statistical_model_options.get(str(stat_model.backend_type), {}) - ) + current_kwargs = {} + current_kwargs.update( + statistical_model_options.get(str(stat_model.backend_type), {}) + ) - _mu[idx] = stat_model.maximize_likelihood( - expected=expected, **current_kwargs, **optimiser_options - )[0] - _sigma_mu[idx] = stat_model.sigma_mu( - poi_test=_mu[idx], - expected=expected, - **current_kwargs, - **optimiser_options, + _mu[idx] = stat_model.maximize_likelihood( + expected=expected, **current_kwargs, **optimiser_options + )[0] + _sigma_mu[idx] = stat_model.sigma_mu( + poi_test=_mu[idx], + expected=expected, + **current_kwargs, + **optimiser_options, + ) + norm = np.sum(np.power(_sigma_mu, -2)) + mu_init = np.true_divide(1.0, norm) * np.sum( + np.true_divide(_mu, np.square(_sigma_mu)) ) - norm = np.sum(np.power(_sigma_mu, -2)) - mu_init = np.true_divide(1.0, norm) * np.sum( - np.true_divide(_mu, np.square(_sigma_mu)) - ) + except Exception as err: + log.debug(str(err)) + mu_init = 0.0 config: ModelConfig = ModelConfig( poi_index=0, @@ -651,3 +658,16 @@ def twice_nll(poi_test: Union[float, np.ndarray]) -> float: ) return fit_params[0], twice_nll / 2.0 if return_nll else np.exp(-twice_nll / 2.0) + + def __matmul__(self, other: StatisticalModel): + new_model = UnCorrStatisticsCombiner(*self._statistical_models) + if isinstance(other, StatisticalModel): + new_model.append(other) + elif isinstance(other, UnCorrStatisticsCombiner): + for model in other: + new_model.append(model) + else: + raise ValueError( + f"Can not combine type<{type(other)}> with UnCorrStatisticsCombiner" + ) + return new_model diff --git a/tests/test_combiner.py b/tests/test_combiner.py new file mode 100644 index 0000000..19a661c --- /dev/null +++ b/tests/test_combiner.py @@ -0,0 +1,55 @@ +"""Test uncorrelated statistics combiner""" + +import numpy as np + +import spey +from spey.combiner.uncorrelated_statistics_combiner import UnCorrStatisticsCombiner + + +def test_combiner(): + """Test uncorrelated statistics combiner""" + + normal = spey.get_backend("default_pdf.normal")( + signal_yields=[1, 1, 1], + background_yields=[2, 1, 3], + data=[2, 2, 2], + absolute_uncertainties=[1, 1, 1], + ) + normal_cls = normal.exclusion_confidence_level()[0] + normal = spey.get_backend("default_pdf.multivariate_normal")( + signal_yields=[1, 1, 1], + background_yields=[2, 1, 3], + data=[2, 2, 2], + covariance_matrix=np.diag([1, 1, 1]), + ) + multivar_norm_cls = normal.exclusion_confidence_level()[0] + + assert np.isclose( + normal_cls, multivar_norm_cls + ), "Normal CLs is not the same as Multivariant normal CLs" + + normal1 = spey.get_backend("default_pdf.normal")( + signal_yields=[1], + background_yields=[3], + data=[2], + absolute_uncertainties=[1], + analysis="norm1", + ) + normal2 = spey.get_backend("default_pdf.normal")( + signal_yields=[1], + background_yields=[1], + data=[2], + absolute_uncertainties=[1], + analysis="norm2", + ) + normal3 = spey.get_backend("default_pdf.normal")( + signal_yields=[1], + background_yields=[2], + data=[2], + absolute_uncertainties=[1], + analysis="norm3", + ) + combined = UnCorrStatisticsCombiner(normal1, normal2, normal3) + combined_cls = combined.exclusion_confidence_level()[0] + + assert np.isclose(multivar_norm_cls, combined_cls), "Combined CLs is wrong" diff --git a/tests/test_simplifiedllhd_limit.py b/tests/test_simplifiedllhd_limit.py new file mode 100644 index 0000000..e911cbc --- /dev/null +++ b/tests/test_simplifiedllhd_limit.py @@ -0,0 +1,56 @@ +"""Test simplified models at the limit""" + +import numpy as np + +import spey +from spey.helper_functions import covariance_to_correlation + + +def test_simplified_llhds_at_limit(): + """Test models at the limit""" + + base_model = spey.get_backend("default_pdf.uncorrelated_background")( + signal_yields=[1, 1, 1], + background_yields=[2, 1, 3], + data=[2, 2, 2], + absolute_uncertainties=[1, 1, 1], + ) + base_model_cls = base_model.exclusion_confidence_level()[0] + + correlated_model = spey.get_backend("default_pdf.correlated_background")( + signal_yields=[1, 1, 1], + background_yields=[2, 1, 3], + data=[2, 2, 2], + covariance_matrix=np.diag([1, 1, 1]), + ) + correlated_model_cls = correlated_model.exclusion_confidence_level()[0] + + assert np.isclose( + correlated_model_cls, base_model_cls + ), "Correlated model is not same as base model" + + third_moment_model = spey.get_backend("default_pdf.third_moment_expansion")( + signal_yields=[1, 1, 1], + background_yields=[2, 1, 3], + data=[2, 2, 2], + covariance_matrix=np.diag([1, 1, 1]), + third_moment=[0.0, 0.0, 0.0], + ) + third_moment_model_cls = third_moment_model.exclusion_confidence_level()[0] + + assert np.isclose( + third_moment_model_cls, base_model_cls + ), "third moment model is not same as base model" + + eff_sigma_model = spey.get_backend("default_pdf.effective_sigma")( + signal_yields=[1, 1, 1], + background_yields=[2, 1, 3], + data=[2, 2, 2], + correlation_matrix=covariance_to_correlation(np.diag([1, 1, 1])), + absolute_uncertainty_envelops=[(1.0, 1.0), (1.0, 1.0), (1.0, 1.0)], + ) + eff_sigma_model_cls = eff_sigma_model.exclusion_confidence_level()[0] + + assert np.isclose( + eff_sigma_model_cls, base_model_cls + ), "effective sigma model is not same as base model"