Skip to content

Commit 094cb3e

Browse files
renovate[bot]xyzou685vjohnson1-godaddy
authored
fix(deps): update dependency statsmodels to ^0.14.0 (#93)
* fix(deps): update dependency statsmodels to ^0.14.0 * Update poetry.lock * specify statsmodel version * fix tests * remove 3.7 from ci tests --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Xinyu Zou <[email protected]> Co-authored-by: vjohnson1-godaddy <[email protected]>
1 parent e99df63 commit 094cb3e

File tree

6 files changed

+576
-465
lines changed

6 files changed

+576
-465
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ jobs:
1313
fail-fast: false
1414
matrix:
1515
os: [ubuntu-latest, windows-latest, macos-latest]
16-
python-version: ['3.7', '3.8', '3.9', '3.10']
16+
python-version: ['3.8', '3.9', '3.10']
1717
include:
1818
- os: ubuntu-latest
1919
- os: windows-latest

poetry.lock

Lines changed: 565 additions & 454 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ maintainers = ["GoDaddy <[email protected]>"]
99
keywords = ["sample size", "experimentation", "power analysis"]
1010

1111
[tool.poetry.dependencies]
12-
python = ">=3.7.1,<3.11"
13-
statsmodels = "^0.13.1"
12+
python = ">=3.8,<3.11"
13+
statsmodels = "^0.14.0"
1414
jsonschema = "^4.5.1"
1515

1616
[tool.poetry.dev-dependencies]

sample_size/metrics.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def _generate_alt_p_values(
104104
z_alt = stats.norm.rvs(loc=effect_size, size=size, random_state=random_state)
105105
p_values: npt.NDArray[np.float_] = stats.norm.sf(np.abs(z_alt))
106106
if self.alternative == "two-sided":
107-
p_values *= 2
107+
return 2 * p_values
108108
return p_values
109109

110110

@@ -136,7 +136,7 @@ def _generate_alt_p_values(
136136
p_values: npt.NDArray[np.float_] = stats.t.sf(np.abs(t_alt), 2 * (sample_size - 1))
137137
# Todo: use accurate p-value calculation due to nct's asymmetric distribution
138138
if self.alternative == "two-sided":
139-
p_values *= 2
139+
return 2 * p_values
140140
return p_values
141141

142142

@@ -186,5 +186,5 @@ def _generate_alt_p_values(
186186
z_alt = stats.norm.rvs(loc=effect_size, size=size, random_state=random_state)
187187
p_values: npt.NDArray[np.float_] = stats.norm.sf(np.abs(z_alt))
188188
if self.alternative == "two-sided":
189-
p_values *= 2
189+
return 2 * p_values
190190
return p_values

sample_size/multiple_testing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def fdr_bh(a: npt.NDArray[np.float_]) -> npt.NDArray[np.bool_]:
101101
for i, m in enumerate(metrics):
102102
p_values.append(m.generate_p_values(true_alt[i], sample_size, random_state))
103103

104-
rejected = np.apply_along_axis(fdr_bh, 0, np.array(p_values)) # type: ignore[no-untyped-call]
104+
rejected = np.apply_along_axis(fdr_bh, 0, np.array(p_values))
105105

106106
true_discoveries = rejected & true_alt
107107

tests/sample_size/test_metrics.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def test_boolean_metric_get_probability_too_small(self):
124124
@patch("scipy.stats.norm")
125125
def test_boolean__generate_alt_p_values(self, size, sample_size, alternative, mock_norm, mock_variance):
126126
p_value_generator = mock_norm.sf
127-
p_values = MagicMock()
127+
p_values = ["🏝️", "🏜️", "🌋"]
128128
mock_norm.rvs.return_value = -ord("🌮")
129129
p_value_generator.return_value = p_values
130130
mock_variance.__get__ = MagicMock(return_value=self.DEFAULT_MOCK_VARIANCE)
@@ -159,7 +159,7 @@ def test_numeric_metric_constructor_sets_params(self):
159159
@patch("scipy.stats.t")
160160
def test_numeric__generate_alt_p_values(self, size, sample_size, alternative, mock_t, mock_nct, mock_variance):
161161
p_value_generator = mock_t.sf
162-
p_values = MagicMock()
162+
p_values = ["🏝️", "🏜️", "🌋"]
163163
mock_nct.rvs.return_value = -ord("🌮")
164164
p_value_generator.return_value = p_values
165165
mock_variance.__get__ = MagicMock(return_value=self.DEFAULT_VARIANCE)
@@ -226,7 +226,7 @@ def test_ratio_metric_variance(self):
226226
@patch("scipy.stats.norm")
227227
def test_ratio__generate_alt_p_values(self, size, sample_size, alternative, mock_norm, mock_variance):
228228
p_value_generator = mock_norm.sf
229-
p_values = MagicMock()
229+
p_values = ["🏝️", "🏜️", "🌋"]
230230
mock_norm.rvs.return_value = -ord("🌮")
231231
p_value_generator.return_value = p_values
232232
mock_variance.__get__ = MagicMock(return_value=self.DEFAULT_VARIANCE)
@@ -238,7 +238,7 @@ def test_ratio__generate_alt_p_values(self, size, sample_size, alternative, mock
238238
self.DEFAULT_DENOMINATOR_VARIANCE,
239239
self.DEFAULT_COVARIANCE,
240240
self.DEFAULT_MDE,
241-
self.DEFAULT_ALTERNATIVE,
241+
alternative,
242242
)
243243

244244
p = metric._generate_alt_p_values(size, sample_size, RANDOM_STATE)

0 commit comments

Comments
 (0)