diff --git a/HISTORY.rst b/HISTORY.rst index 26c84e1..c55a904 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -2,6 +2,11 @@ History ======= +3.0.3 (2023-10-12) +------------------ +* Relaxing version requirements for scipy and pandas to allow versions 2.x + + 3.0.2 (2023-08-08) ------------------ * Added docstring to Experiment diff --git a/README.md b/README.md index b729278..3a54a49 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Spotify Confidence ======== ![Status](https://img.shields.io/badge/Status-Beta-blue.svg) -![Latest release](https://img.shields.io/badge/release-3.0.2-green.svg "Latest release: 3.0.2") +![Latest release](https://img.shields.io/badge/release-3.0.3-green.svg "Latest release: 3.0.3") ![Python](https://img.shields.io/badge/Python-3.7-blue.svg "Python") ![Python](https://img.shields.io/badge/Python-3.8-blue.svg "Python") ![Python](https://img.shields.io/badge/Python-3.9-blue.svg "Python") diff --git a/setup.cfg b/setup.cfg index 559cc42..ee31e27 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = spotify-confidence -version = 3.0.2 +version = 3.0.3 author = Per Sillren author_email = pers@spotify.com description = Package for calculating and visualising confidence intervals, e.g. for A/B test analysis. @@ -21,8 +21,8 @@ packages = find: python_requires = >=3.8 install_requires = numpy>=1.20.0,<2.0.0 - scipy>=1.6.0,<1.8.0 - pandas>=1.2.0,<2.0.0 + scipy>=1.6.0 + pandas>=1.2.0 statsmodels>=0.13.0,<1.0.0 chartify>=4.0.3 ipywidgets>=8.0.0 diff --git a/spotify_confidence/analysis/frequentist/confidence_computers/z_test_computer.py b/spotify_confidence/analysis/frequentist/confidence_computers/z_test_computer.py index bbe923f..cbb0319 100644 --- a/spotify_confidence/analysis/frequentist/confidence_computers/z_test_computer.py +++ b/spotify_confidence/analysis/frequentist/confidence_computers/z_test_computer.py @@ -4,7 +4,11 @@ from pandas import DataFrame, Series from scipy import optimize from scipy import stats as st -from scipy.stats.stats import _unequal_var_ttest_denom + +try: + from scipy.stats._stats_py import _unequal_var_ttest_denom +except ImportError: # Fallback for scipy<1.8.0 + from scipy.stats.stats import _unequal_var_ttest_denom from statsmodels.stats.weightstats import _zconfint_generic, _zstat_generic diff --git a/tests/frequentist/test_freqsamplesizecalculator.py b/tests/frequentist/test_freqsamplesizecalculator.py index be77024..1eaf148 100644 --- a/tests/frequentist/test_freqsamplesizecalculator.py +++ b/tests/frequentist/test_freqsamplesizecalculator.py @@ -819,6 +819,6 @@ def test_sample_size_with_nan(self): ) assert len(ss) == len(df) - assert ss[REQUIRED_SAMPLE_SIZE_METRIC].values[0] is None + assert ss[REQUIRED_SAMPLE_SIZE_METRIC].isna()[0] assert 0.999 < ss[REQUIRED_SAMPLE_SIZE_METRIC].values[1] / 95459 < 1.001 assert ss[CI_WIDTH].isna().all() diff --git a/tests/frequentist/test_ztest.py b/tests/frequentist/test_ztest.py index 7c3a16f..1f80f16 100644 --- a/tests/frequentist/test_ztest.py +++ b/tests/frequentist/test_ztest.py @@ -203,8 +203,8 @@ def test_powered_effect(self): assert np.isclose(powered_effect[POWERED_EFFECT][1], 0.5291, atol=0.001) assert np.isclose(powered_effect[POWERED_EFFECT][2], 0.4596, atol=0.001) assert np.isclose(powered_effect[POWERED_EFFECT][3], 0.4869, atol=0.001) - assert powered_effect[REQUIRED_SAMPLE_SIZE][0] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][1] is None + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[0] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[1] assert np.isclose(powered_effect[REQUIRED_SAMPLE_SIZE][2], 16487886, atol=100) assert np.isclose(powered_effect[REQUIRED_SAMPLE_SIZE][3], 3083846, atol=100) @@ -329,10 +329,10 @@ def test_powered_effect(self): assert np.isclose(powered_effect[POWERED_EFFECT][6], 0.4995, atol=0.001) assert np.isclose(powered_effect[POWERED_EFFECT][7], 0.5291, atol=0.001) - assert powered_effect[REQUIRED_SAMPLE_SIZE][0] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][1] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][2] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][3] is None + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[0] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[1] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[2] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[3] assert np.isclose(powered_effect[REQUIRED_SAMPLE_SIZE][4], 19475238, atol=100) assert np.isclose(powered_effect[REQUIRED_SAMPLE_SIZE][5], 3642591, atol=100) assert np.isclose(powered_effect[REQUIRED_SAMPLE_SIZE][6], 19475238, atol=100) @@ -733,12 +733,12 @@ def test_powered_effect(self): # assert np.isclose(powered_effect[POWERED_EFFECT][10], 0.2663, atol=0.001) # assert np.isclose(powered_effect[POWERED_EFFECT][11], 0.2479, atol=0.001) - assert powered_effect[REQUIRED_SAMPLE_SIZE][0] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][1] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][2] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][3] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][4] is None - assert powered_effect[REQUIRED_SAMPLE_SIZE][5] is None + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[0] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[1] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[2] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[3] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[4] + assert powered_effect[REQUIRED_SAMPLE_SIZE].isna()[5] assert np.isclose(powered_effect[REQUIRED_SAMPLE_SIZE][6], 260541, atol=100) assert np.isclose(powered_effect[REQUIRED_SAMPLE_SIZE][7], 361863, atol=100) assert np.isclose(powered_effect[REQUIRED_SAMPLE_SIZE][8], 326159, atol=100)