Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bugfix in stat combiner #43

Merged
merged 8 commits into from
Nov 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .zenodo.json
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
{
"description": "smooth inference for reinterpretation studies",
"license": "MIT",
"title": "SpeysideHEP/spey: v0.1.10",
"version": "v0.1.10",
"title": "SpeysideHEP/spey: v0.1.11",
"version": "v0.1.11",
"upload_type": "software",
"creators": [
{
Expand Down Expand Up @@ -31,7 +31,7 @@
},
{
"scheme": "url",
"identifier": "https://github.com/SpeysideHEP/spey/tree/v0.1.10",
"identifier": "https://github.com/SpeysideHEP/spey/tree/v0.1.11",
"relation": "isSupplementTo"
},
{
Expand Down
3 changes: 3 additions & 0 deletions docs/releases/changelog-v0.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,9 @@ Specific upgrades for the latest release can be found [here](https://github.com/
* p-value computation in $\chi^2$ and toy calculators have been fixed.
([#42](https://github.com/SpeysideHEP/spey/pull/42))

* Bugfix in uncorrelated statistics combiner. This is due to a crash while computing $\sigma_\mu$ through inverse of the Hessian matrix.
([#43](https://github.com/SpeysideHEP/spey/pull/43))

## Contributors

This release contains contributions from (in alphabetical order):
Expand Down
2 changes: 1 addition & 1 deletion src/spey/_version.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
"""Version number (major.minor.patch[-label])"""

__version__ = "0.1.10"
__version__ = "0.1.11-beta"
5 changes: 2 additions & 3 deletions src/spey/backends/default_pdf/third_moment.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,9 @@ def third_moment_expansion(
)
)
if len(w) > 0:
warnings.warn(
log.warning(
"8 * diag(cov)**3 >= third_moment**2 condition is not satisfied,"
" setting nan values to zero.",
category=RuntimeWarning,
" setting nan values to zero."
)
C = np.where(np.isnan(C), 0.0, C)
log.debug(f"C: {C}")
Expand Down
56 changes: 38 additions & 18 deletions src/spey/combiner/uncorrelated_statistics_combiner.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
of different statistical models for hypothesis testing
"""

import logging
import warnings
from typing import Dict, Iterator, List, Optional, Text, Tuple, Union

Expand All @@ -17,6 +18,8 @@

__all__ = ["UnCorrStatisticsCombiner"]

log = logging.getLogger("Spey")


class UnCorrStatisticsCombiner(HypothesisTestingBase):
"""
Expand Down Expand Up @@ -494,27 +497,31 @@ def maximize_likelihood(
# muhat initial value estimation in gaussian limit
mu_init = initial_muhat_value or 0.0
if initial_muhat_value is None:
_mu, _sigma_mu = np.zeros(len(self)), np.ones(len(self))
for idx, stat_model in enumerate(self):
try:
_mu, _sigma_mu = np.zeros(len(self)), np.ones(len(self))
for idx, stat_model in enumerate(self):

current_kwargs = {}
current_kwargs.update(
statistical_model_options.get(str(stat_model.backend_type), {})
)
current_kwargs = {}
current_kwargs.update(
statistical_model_options.get(str(stat_model.backend_type), {})
)

_mu[idx] = stat_model.maximize_likelihood(
expected=expected, **current_kwargs, **optimiser_options
)[0]
_sigma_mu[idx] = stat_model.sigma_mu(
poi_test=_mu[idx],
expected=expected,
**current_kwargs,
**optimiser_options,
_mu[idx] = stat_model.maximize_likelihood(
expected=expected, **current_kwargs, **optimiser_options
)[0]
_sigma_mu[idx] = stat_model.sigma_mu(
poi_test=_mu[idx],
expected=expected,
**current_kwargs,
**optimiser_options,
)
norm = np.sum(np.power(_sigma_mu, -2))
mu_init = np.true_divide(1.0, norm) * np.sum(
np.true_divide(_mu, np.square(_sigma_mu))
)
norm = np.sum(np.power(_sigma_mu, -2))
mu_init = np.true_divide(1.0, norm) * np.sum(
np.true_divide(_mu, np.square(_sigma_mu))
)
except Exception as err:
log.debug(str(err))
mu_init = 0.0

config: ModelConfig = ModelConfig(
poi_index=0,
Expand Down Expand Up @@ -651,3 +658,16 @@ def twice_nll(poi_test: Union[float, np.ndarray]) -> float:
)

return fit_params[0], twice_nll / 2.0 if return_nll else np.exp(-twice_nll / 2.0)

def __matmul__(self, other: StatisticalModel):
new_model = UnCorrStatisticsCombiner(*self._statistical_models)
if isinstance(other, StatisticalModel):
new_model.append(other)
elif isinstance(other, UnCorrStatisticsCombiner):
for model in other:
new_model.append(model)
else:
raise ValueError(
f"Can not combine type<{type(other)}> with UnCorrStatisticsCombiner"
)
return new_model
55 changes: 55 additions & 0 deletions tests/test_combiner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
"""Test uncorrelated statistics combiner"""

import numpy as np

import spey
from spey.combiner.uncorrelated_statistics_combiner import UnCorrStatisticsCombiner


def test_combiner():
"""Test uncorrelated statistics combiner"""

normal = spey.get_backend("default_pdf.normal")(
signal_yields=[1, 1, 1],
background_yields=[2, 1, 3],
data=[2, 2, 2],
absolute_uncertainties=[1, 1, 1],
)
normal_cls = normal.exclusion_confidence_level()[0]
normal = spey.get_backend("default_pdf.multivariate_normal")(
signal_yields=[1, 1, 1],
background_yields=[2, 1, 3],
data=[2, 2, 2],
covariance_matrix=np.diag([1, 1, 1]),
)
multivar_norm_cls = normal.exclusion_confidence_level()[0]

assert np.isclose(
normal_cls, multivar_norm_cls
), "Normal CLs is not the same as Multivariant normal CLs"

normal1 = spey.get_backend("default_pdf.normal")(
signal_yields=[1],
background_yields=[3],
data=[2],
absolute_uncertainties=[1],
analysis="norm1",
)
normal2 = spey.get_backend("default_pdf.normal")(
signal_yields=[1],
background_yields=[1],
data=[2],
absolute_uncertainties=[1],
analysis="norm2",
)
normal3 = spey.get_backend("default_pdf.normal")(
signal_yields=[1],
background_yields=[2],
data=[2],
absolute_uncertainties=[1],
analysis="norm3",
)
combined = UnCorrStatisticsCombiner(normal1, normal2, normal3)
combined_cls = combined.exclusion_confidence_level()[0]

assert np.isclose(multivar_norm_cls, combined_cls), "Combined CLs is wrong"
56 changes: 56 additions & 0 deletions tests/test_simplifiedllhd_limit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
"""Test simplified models at the limit"""

import numpy as np

import spey
from spey.helper_functions import covariance_to_correlation


def test_simplified_llhds_at_limit():
"""Test models at the limit"""

base_model = spey.get_backend("default_pdf.uncorrelated_background")(
signal_yields=[1, 1, 1],
background_yields=[2, 1, 3],
data=[2, 2, 2],
absolute_uncertainties=[1, 1, 1],
)
base_model_cls = base_model.exclusion_confidence_level()[0]

correlated_model = spey.get_backend("default_pdf.correlated_background")(
signal_yields=[1, 1, 1],
background_yields=[2, 1, 3],
data=[2, 2, 2],
covariance_matrix=np.diag([1, 1, 1]),
)
correlated_model_cls = correlated_model.exclusion_confidence_level()[0]

assert np.isclose(
correlated_model_cls, base_model_cls
), "Correlated model is not same as base model"

third_moment_model = spey.get_backend("default_pdf.third_moment_expansion")(
signal_yields=[1, 1, 1],
background_yields=[2, 1, 3],
data=[2, 2, 2],
covariance_matrix=np.diag([1, 1, 1]),
third_moment=[0.0, 0.0, 0.0],
)
third_moment_model_cls = third_moment_model.exclusion_confidence_level()[0]

assert np.isclose(
third_moment_model_cls, base_model_cls
), "third moment model is not same as base model"

eff_sigma_model = spey.get_backend("default_pdf.effective_sigma")(
signal_yields=[1, 1, 1],
background_yields=[2, 1, 3],
data=[2, 2, 2],
correlation_matrix=covariance_to_correlation(np.diag([1, 1, 1])),
absolute_uncertainty_envelops=[(1.0, 1.0), (1.0, 1.0), (1.0, 1.0)],
)
eff_sigma_model_cls = eff_sigma_model.exclusion_confidence_level()[0]

assert np.isclose(
eff_sigma_model_cls, base_model_cls
), "effective sigma model is not same as base model"