Skip to content

Commit 4377caf

Browse files
Merge pull request #1621 from CamDavidsonPilon/0.29.0
bump to 0.29.0; bump scipy and pandas dependencies
2 parents fb5ad90 + dd6d70a commit 4377caf

File tree

12 files changed

+27
-19
lines changed

12 files changed

+27
-19
lines changed

.github/workflows/ci.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ jobs:
99
fail-fast: true
1010
matrix:
1111
os: ["ubuntu-latest"]
12-
python-version: ["3.9", "3.10", "3.11"]
12+
python-version: ["3.9", "3.10", "3.11", "3.12"]
1313

1414
steps:
1515
- name: Checkout source

CHANGELOG.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,12 @@
11
## Changelog
22

3-
#### 0.28.0 - 2023-01-03
3+
#### 0.29.0 - 2024-06-25
4+
- update dependencies (pandas >= 2.1)
5+
- update dependencies (scipy >= 1.7)
6+
7+
8+
9+
#### 0.28.0 - 2024-01-03
410
- Fixes bins that are far into the future with using `survival_table_from_events`, see #1587
511
- Removed `sklean_adaptor`. It was a terrible hack, and causing more confusion and support debt than I want. This cleans up our API and simplifies the library. ✨ There's no replacement, and I doubt I'll introduce one ✨
612
- Fix pandas>=2.0 compatibility.

lifelines/fitters/coxph_fitter.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3223,7 +3223,7 @@ def predict_cumulative_hazard(self, df, times=None, conditional_after=None) -> p
32233223

32243224
for stratum, stratified_X in df.groupby(self.strata):
32253225
log_lambdas_ = anp.array(
3226-
[0] + [self.params_[self._strata_labeler(stratum, i)][0] for i in range(2, self.n_breakpoints + 2)]
3226+
[0] + [self.params_.loc[self._strata_labeler(stratum, i)].iloc[0] for i in range(2, self.n_breakpoints + 2)]
32273227
)
32283228
lambdas_ = np.exp(log_lambdas_)
32293229

@@ -3237,7 +3237,9 @@ def predict_cumulative_hazard(self, df, times=None, conditional_after=None) -> p
32373237
return cumulative_hazard
32383238

32393239
else:
3240-
log_lambdas_ = np.array([0] + [self.params_[param][0] for param in self._fitted_parameter_names if param != "beta_"])
3240+
log_lambdas_ = np.array(
3241+
[0] + [self.params_.loc[param].iloc[0] for param in self._fitted_parameter_names if param != "beta_"]
3242+
)
32413243
lambdas_ = np.exp(log_lambdas_)
32423244

32433245
Xs = self.regressors.transform_df(df)

lifelines/fitters/npmle.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,7 @@ def reconstruct_survival_function(
291291

292292
# First backfill at events between known observations
293293
# Second fill all events _outside_ known obs with running_sum
294-
return full_dataframe.combine_first(df).bfill().fillna(running_sum).clip(lower=0.0)
294+
return full_dataframe.combine_first(df).astype(float).bfill().fillna(running_sum).clip(lower=0.0)
295295

296296

297297
def npmle_compute_confidence_intervals(left, right, mle_, alpha=0.05, samples=1000):

lifelines/fitters/piecewise_exponential_regression_fitter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def _add_penalty(self, params, neg_ll):
6666
coef_penalty = 0
6767
if self.penalizer > 0:
6868
for i in range(params_stacked.shape[1]):
69-
if not self._cols_to_not_penalize[i]:
69+
if not self._cols_to_not_penalize.iloc[i]:
7070
coef_penalty = coef_penalty + (params_stacked[:, i]).var()
7171

7272
return neg_ll + self.penalizer * coef_penalty

lifelines/generate_datasets.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from scipy import stats
77
from scipy.optimize import newton
8-
from scipy.integrate import cumtrapz
8+
from scipy.integrate import cumulative_trapezoid
99

1010
random = np.random
1111

@@ -308,7 +308,7 @@ def cumulative_integral(fx, x):
308308
fx: (n,d) numpy array, what you want to integral of
309309
x: (n,) numpy array, location to integrate over.
310310
"""
311-
return cumtrapz(fx.T, x, initial=0).T
311+
return cumulative_trapezoid(fx.T, x, initial=0).T
312312

313313

314314
def construct_survival_curves(hazard_rates, timelines):

lifelines/tests/test_estimation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2008,7 +2008,7 @@ def test_joblib_serialization(self, rossi, regression_models):
20082008
def test_fit_will_accept_object_dtype_as_event_col(self, regression_models_sans_strata_model, rossi):
20092009
# issue #638
20102010
rossi["arrest"] = rossi["arrest"].astype(object)
2011-
rossi["arrest"].iloc[0] = None
2011+
rossi.loc[0, "arrest"] = None
20122012

20132013
assert rossi["arrest"].dtype == object
20142014
rossi = rossi.dropna()

lifelines/tests/utils/test_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -347,7 +347,7 @@ def test_survival_table_from_events_at_risk_column():
347347
1.0,
348348
]
349349
df = utils.survival_table_from_events(df["T"], df["E"])
350-
assert list(df["at_risk"][1:]) == expected # skip the first event as that is the birth time, 0.
350+
assert list(df["at_risk"].loc[1:]) == expected # skip the first event as that is the birth time, 0.
351351

352352

353353
def test_survival_table_to_events_casts_to_float():

lifelines/utils/__init__.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -556,7 +556,7 @@ def _group_event_table_by_intervals(event_table, intervals) -> pd.DataFrame:
556556

557557
intervals = np.arange(0, event_max + bin_width, bin_width)
558558

559-
event_table = event_table.groupby(pd.cut(event_table["event_at"], intervals, include_lowest=True)).agg(
559+
event_table = event_table.groupby(pd.cut(event_table["event_at"], intervals, include_lowest=True), observed=False).agg(
560560
{"removed": ["sum"], "observed": ["sum"], "censored": ["sum"], "at_risk": ["max"]}
561561
)
562562
# convert columns from multiindex
@@ -648,7 +648,7 @@ def datetimes_to_durations(
648648
the units of time to use. See Pandas 'freq'. Default 'D' for days.
649649
dayfirst: bool, optional (default=False)
650650
see Pandas `to_datetime`
651-
na_values : list, optional
651+
na_values : list[str], optional
652652
list of values to recognize as NA/NaN. Ex: ['', 'NaT']
653653
format:
654654
see Pandas `to_datetime`
@@ -679,7 +679,7 @@ def datetimes_to_durations(
679679
start_times = pd.Series(start_times).copy()
680680
end_times = pd.Series(end_times).copy()
681681

682-
C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""]))
682+
C = ~(pd.isnull(end_times).values | end_times.astype(str).isin(na_values or [""]))
683683
end_times[~C] = fill_date_
684684
start_times_ = pd.to_datetime(start_times, dayfirst=dayfirst, format=format)
685685
end_times_ = pd.to_datetime(end_times, dayfirst=dayfirst, errors="coerce", format=format)
@@ -1464,7 +1464,7 @@ def expand(df, cvs):
14641464
cv = cv.sort_values([id_col, duration_col])
14651465
cvs = cv.pipe(remove_redundant_rows).pipe(transform_cv_to_long_format).groupby(id_col, sort=True)
14661466

1467-
long_form_df = long_form_df.groupby(id_col, group_keys=False, sort=True).apply(expand, cvs=cvs)
1467+
long_form_df = long_form_df.groupby(id_col, group_keys=False, sort=True)[long_form_df.columns].apply(expand, cvs=cvs)
14681468
return long_form_df.reset_index(drop=True)
14691469

14701470

@@ -1506,7 +1506,7 @@ def covariates_from_event_matrix(df, id_col) -> pd.DataFrame:
15061506
"""
15071507
df = df.set_index(id_col)
15081508
df = df.fillna(np.inf)
1509-
df = df.stack(dropna=False).reset_index()
1509+
df = df.stack(future_stack=True).reset_index()
15101510
df.columns = [id_col, "event", "duration"]
15111511
df["_counter"] = 1
15121512
return (

lifelines/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# -*- coding: utf-8 -*-
22
from __future__ import unicode_literals
33

4-
__version__ = "0.28.0"
4+
__version__ = "0.29.0"

0 commit comments

Comments
 (0)