From 954058ef0415745f4b90da3a1ad6d32783c4bbd2 Mon Sep 17 00:00:00 2001 From: Max Kuhn Date: Mon, 15 May 2023 22:31:00 -0400 Subject: [PATCH 1/6] initial files --- .../statistics/survival-ipcw/figs/RKM-1.svg | 825 ++++++++++++++++++ .../survival-ipcw/figs/surv-plot-1.svg | 84 ++ .../survival-ipcw/figs/usable-data-1.svg | 79 ++ .../survival-ipcw/index.Rmarkdown.Rmd | 328 +++++++ .../survival-ipcw/index.Rmarkdown.html | 273 ++++++ 5 files changed, 1589 insertions(+) create mode 100644 content/learn/statistics/survival-ipcw/figs/RKM-1.svg create mode 100644 content/learn/statistics/survival-ipcw/figs/surv-plot-1.svg create mode 100644 content/learn/statistics/survival-ipcw/figs/usable-data-1.svg create mode 100644 content/learn/statistics/survival-ipcw/index.Rmarkdown.Rmd create mode 100644 content/learn/statistics/survival-ipcw/index.Rmarkdown.html diff --git a/content/learn/statistics/survival-ipcw/figs/RKM-1.svg b/content/learn/statistics/survival-ipcw/figs/RKM-1.svg new file mode 100644 index 00000000..3f4b484b --- /dev/null +++ b/content/learn/statistics/survival-ipcw/figs/RKM-1.svg @@ -0,0 +1,825 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.00 +0.25 +0.50 +0.75 +1.00 + + + + + + + + + +0 +5 +10 +15 +time +probability of being censored + + diff --git a/content/learn/statistics/survival-ipcw/figs/surv-plot-1.svg b/content/learn/statistics/survival-ipcw/figs/surv-plot-1.svg new file mode 100644 index 00000000..a083b00b --- /dev/null +++ b/content/learn/statistics/survival-ipcw/figs/surv-plot-1.svg @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0.00 +0.25 +0.50 +0.75 +1.00 + + + + + + + + + +0 +5 +10 +15 +time +survival probability + + diff --git a/content/learn/statistics/survival-ipcw/figs/usable-data-1.svg b/content/learn/statistics/survival-ipcw/figs/usable-data-1.svg new file mode 100644 index 00000000..32570a00 --- /dev/null +++ b/content/learn/statistics/survival-ipcw/figs/usable-data-1.svg @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +0 +50 +100 +150 +200 +250 + + + + + + + + + + +0 +5 +10 +15 +time +Number of Usable Samples + + diff --git a/content/learn/statistics/survival-ipcw/index.Rmarkdown.Rmd b/content/learn/statistics/survival-ipcw/index.Rmarkdown.Rmd new file mode 100644 index 00000000..161abbcc --- /dev/null +++ b/content/learn/statistics/survival-ipcw/index.Rmarkdown.Rmd @@ -0,0 +1,328 @@ +--- +title: "Understanding Inverse Probability of Censoring Weights (IPCW)" +tags: [survival,censored,parsnip] +categories: [statistical analysis] +type: learn-subsection +weight: 9 +description: | + Learn how tidymodels uses causal inference tools to measure performance of + survival models. +--- + +```{r setup, include = FALSE, message = FALSE, warning = FALSE} +source(here::here("content/learn/common.R")) +``` + +```{r load, include = FALSE} +library(tidymodels) +pkgs <- c("tidymodels", "censored", "prodlim") +theme_set(theme_bw() + theme(legend.position = "top")) +tidymodels_prefer() +options(pillar.advice = FALSE, pillar.min_title_chars = Inf) +``` + +## Introduction + + +One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call the dynamic performance metrics. + +Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's another article though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). + +The section below discusses the practical aspects of moving to a binary outcome and the statistical implications. Before we start, though, let's define the various types of times that will be mentioned: + +- Observed time: time recorded in the data +- Event time: observed times for actual events +- Evaluation time: the time, specified by the analyst, that the model is evaluated. + +For example, we'll simulate some data using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=) using the prodlim package. A training and validation set are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: + +```{r} +#| label: data + +library(tidymodels) +library(censored) +library(prodlim) + +set.seed(5882) +sim_dat <- SimSurv(1000) %>% + mutate(event_time = Surv(time, event)) %>% + select(event_time, X1, X2) + +set.seed(2) +split <- initial_split(sim_dat) +sim_tr <- training(split) +sim_val <- testing(split) +``` + +We'll need a model to illustrate the code and concepts. Let's fit a bagged survival tree model to the training set: + +```{r} +#| label: bag-tree-fit +set.seed(17) +bag_tree_fit <- + bag_tree() %>% + set_mode("censored regression") %>% + set_engine("rpart") %>% + fit(event_time ~ ., data = sim_tr) +bag_tree_fit +``` + +Using this model, we'll be able to make predictions of different types. + +## Survival Probability Prediction + +This type of censored regression model can make static predictions via the predicted event time using `predict(object, type = "time")`. It can also make dynamic predictions regarding the probability of survival for each data point at specific times. The syntax for this is + +```r +predict(object, new_data, type = "survival", eval_time = numeric()) +``` + +where `eval_time` is a vector of time points at which we want the corresponding estimates of the survivor function. Alternately, we can use the `augment()` function to get both types of prediction and automatically attach them to the data being predicted. + +The largest event time in the training set is `r round(max(sim_tr$event_time[,1]), 3)` so we will use a set of evaluation times between zero and 18. From there, we use `augment()`: + +```{r} +#| label: val-pred + +time_points <- seq(0, 18, by = 0.25) + +val_pred <- augment(bag_tree_fit, sim_val, eval_time = time_points) +val_pred +``` + +The `.pred` column contains the dynamic predictions in a list column. Since `length(time_points)` is `r length(time_points)`, each data frame in the list column has that many rows: + +```{r} +#| label: val-pred-dynamic + +val_pred$.pred[[1]] +``` + +The `.pred_survival` column has the predictions. We'll see what the other columns represent in the next few sections. + +Let's plot the estimated survivor curves for the first 10 data points in the validation set: + +```{r} +#| label: surv-plot + +# Unnest the list columns: +dyn_val_pred <- + val_pred %>% + select(.pred, event_time) %>% + add_rowindex() %>% + unnest(.pred) + +dyn_val_pred %>% + filter(.row <= 10) %>% + ggplot(aes(.eval_time, .pred_survival, group = .row, col = factor(.row))) + + geom_step(direction = "vh", linewidth = 1, alpha = 1 / 2, show.legend = FALSE) + + labs(x = "time", y = "survival probability") +``` + +## Converting censored data to binary data + +We follow the process described by [Graf _et al_ (1999)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Assessment+and+Comparison+of+Prognostic+Classification+Schemes+for+Survival+Data.%22&btnG=) where observations at evaluation time `t` are categorized into three groups. + + - **Definitive non-events**: event times are less than the evaluation time ("it hasn't happened yet") + - **Definitive events**: Observed times (censored or not) are greater than the evaluation time ("it happens sometime after now"). + - **Ambiguous outcomes**: Observed censored time is less than the evaluation time ("maybe it happens, maybe not"). + +We can use the first two sets of data to compute binary performance metrics, but the third set cannot be used. Note that the size of the third group is likely to grow as the evaluation time increases. This means we'll have fewer data points to evaluate with late evaluation times. This implies that the variation in the metrics will be considerable as time goes on. For our simulated training set: + +```{r} +#| label: usable-data +dyn_val_pred %>% + summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% + ggplot() + + geom_step(aes(.eval_time, num_usable)) + + labs(x = "time", y = "Number of Usable Samples") + + lims(y = c(0, nrow(sim_val))) +``` + +Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). + +How do we compute this probability? It appears that the standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a fairly reliable non-parametric model for estimating the probability of being censored at a given time. + +Every time a censored regression model is created using tidymodels, the RKM is estimated on the same data being used to fit the model and attached to the parsnip object. + +For our simulated data, here is what the RKM curve looks like: + +```{r} +#| label: RKM +#| echo : false + +dyn_val_pred %>% + select(.weight_time, .pred_censored) %>% + distinct() %>% + ggplot() + + geom_step(aes(.weight_time, .pred_censored)) + + ylim(0:1) + + geom_rug( + data = sim_tr %>% filter(event_time[,2] == 1), + aes(x = event_time[,1]), + alpha = 1 / 2, col = "red" + ) + + geom_rug( + data = sim_tr %>% filter(event_time[,2] == 0), + aes(x = event_time[,1]), + alpha = 1 / 2, col = "blue", sides = "t" + ) + + labs(x = "time", y = "probability of being censored") +``` + +The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations. + +For any dynamic computations, we multiply the contributions of the case by the inverse of the probability of being censored. This is called the inverse probability of censoring weights (IPCW). This should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations. + +### Some computational details + +First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf suggests (as it seems most appropriate): + + - If the observed time corresponds to an actual event, and that time is before the evaluation time, the probability of being censored is predicted at the observed time. + - If the observed time is after the evaluation time, regardless of the status, the probability of being censored is predicted at the evaluation time. + +Here's an example using the first data point in the validation set: + +```{r} +#| label: eval-time-censored +dyn_val_pred %>% + filter(.row == 1 & .eval_time %in% c(1, 5, 5.75, 10, 15)) %>% + select(event_time, .eval_time, .weight_time, .pred_censored, .weight_censored) +``` + +The observed event time was `r round(parsnip:::.extract_surv_time(sim_val$event_time[1]), 3)`. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. + +We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring is computed just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. + +Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the model to include covariates (as well as models beyond the RKM). + +```{r} +time_as_binary_event <- function(surv, eval_time) { + event_time <- parsnip:::.extract_surv_time(surv) + status <- parsnip:::.extract_surv_status(surv) + is_event_before_t <- event_time <= eval_time & status == 1 + + # Three possible contributions to the statistic from Graf 1999 + # Censoring time before eval_time, no contribution (Graf category 3) + binary_res <- rep(NA_character_, length(event_time)) + + # A real event prior to eval_time (Graf category 1) + binary_res <- ifelse(is_event_before_t, "event", binary_res) + + # Observed time greater than eval_time (Graf category 2) + binary_res <- ifelse(event_time > eval_time, "non-event", binary_res) + factor(binary_res, levels = c("event", "non-event")) +} + +binary_encoding <- + dyn_val_pred %>% + mutate( + obs_class = time_as_binary_event(event_time, .eval_time), + pred_class = if_else(.pred_survival >= 1 / 2, "non-event", "event"), + pred_class = factor(pred_class, levels = c("event", "non-event")), + ) +``` + +While another article delves into the details of performance metrics for censored data, we'll look at the 2x2 confusion matrices at a few time points. + +Let's start with an evaluation time of 1.00. Ordinarily, we would simply use: + +```r +binary_encoding %>% + filter(.eval_time == 1.00) %>% + conf_mat(truth = obs_class, estimate = pred_class) +``` + +to compute the metric. This would ignore the censoring weights so we'll add the `case_weights` argument to `conf_mat()`: + +```{r} +#: label: conf-mat-01 +binary_encoding %>% + filter(.eval_time == 1.00) %>% + conf_mat(truth = obs_class, + estimate = pred_class, + case_weights = .weight_censored) +``` + + +```{r} +#| label: conf-mat-01-hide +#| include: false +dat_01 <- binary_encoding %>% filter(.eval_time == 1.00 & !is.na(.weight_censored)) +events_01 <- nrow(dat_01 %>% filter(event_time[,1] <= 1)) +``` + +The values in the cells are the sum of the censoring weights, There are `r events_01` actual events (out of `r nrow(dat_01)` usable values) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so there inverse values are also. + +This early, performance looks very good but that is mostly since there are few events and they are all predicted well (at least with the default 50% cutoff). + +Let's shift to an evaluation time of 5.0. + +```{r} +#: label: conf-mat-05 +binary_encoding %>% + filter(.eval_time == 5.00) %>% + conf_mat(truth = obs_class, + estimate = pred_class, + case_weights = .weight_censored) +``` + +```{r} +#| label: conf-mat-05-hide +#| include: false +dat_05 <- binary_encoding %>% filter(.eval_time == 5.00 & !is.na(.weight_censored)) +events_05 <- nrow(dat_05 %>% filter(event_time[,1] <= 5)) +cls_set <- metric_set(accuracy, sens, spec) +stats_05 <- + binary_encoding %>% + filter(.eval_time == 5.00) %>% + cls_set(truth = obs_class, + estimate = pred_class, + case_weights = .weight_censored) +``` + +Now we have fewer total observations to consider (`r nrow(dat_05)` instead of `r nrow(dat_01)` usable values) and more events (`r events_05` this time). Performance is fairly good; the sensitivity is `r round(stats_05$.estimate[2] * 100, 1)`% and the specificty is `r round(stats_05$.estimate[3] * 100, 1)`%. + +What happends when the evaluation time is 17? + +```{r} +#: label: conf-mat-17 +binary_encoding %>% + filter(.eval_time == 17.00) %>% + conf_mat(truth = obs_class, + estimate = pred_class, + case_weights = .weight_censored) +``` + +```{r} +#| label: conf-mat-17-hide +#| include: false +dat_17 <- binary_encoding %>% filter(.eval_time == 17.00 & !is.na(.weight_censored)) +events_17 <- nrow(dat_17 %>% filter(event_time[,1] <= 17)) +stats_17 <- + binary_encoding %>% + filter(.eval_time == 17.00) %>% + cls_set(truth = obs_class, + estimate = pred_class, + case_weights = .weight_censored) +``` + +The data are overwhelmingly events: `r events_17` out of `r nrow(dat_17)` observations. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is `r round(mean(dat_17$.weight_censored), 2)`. + +There's more on dynamic performance metrics in another article. + +## Summary + +When converting censored event time data to a binary format, the main point to remember are: + +* Some data points cannot be used in the calculations. +* To properly estimate statistical quantities, we weight the computations by the inverse of the probability of being censored. +* tidymodels currently assumes non-informative censoring. + + +## Session information + +```{r si, echo = FALSE} +small_session(pkgs) +``` + diff --git a/content/learn/statistics/survival-ipcw/index.Rmarkdown.html b/content/learn/statistics/survival-ipcw/index.Rmarkdown.html new file mode 100644 index 00000000..22f4cb1a --- /dev/null +++ b/content/learn/statistics/survival-ipcw/index.Rmarkdown.html @@ -0,0 +1,273 @@ +--- +title: "Understanding Inverse Probability of Censoring Weights (IPCW)" +tags: [survival,censored,parsnip] +categories: [statistical analysis] +type: learn-subsection +weight: 9 +description: | + Learn how tidymodels uses causal inference tools to measure performance of + survival models. +--- + + + +
+

Introduction

+

One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call the dynamic performance metrics.

+

Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that’s another article though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary “was there an event at time t?” version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50).

+

The section below discusses the practical aspects of moving to a binary outcome and the statistical implications. Before we start, though, let’s define the various types of times that will be mentioned:

+ +

For example, we’ll simulate some data using the methods of Bender et al (2005) using the prodlim package. A training and validation set are simulated. We’ll also load the censored package so that we can fit a model to these time-to-event data:

+
library(tidymodels)
+library(censored)
+#> Loading required package: survival
+library(prodlim)
+
+set.seed(5882)
+sim_dat <- SimSurv(1000) %>%
+  mutate(event_time = Surv(time, event)) %>%
+  select(event_time, X1, X2)
+
+set.seed(2)
+split   <- initial_split(sim_dat)
+sim_tr  <- training(split)
+sim_val <- testing(split)
+

We’ll need a model to illustrate the code and concepts. Let’s fit a bagged survival tree model to the training set:

+
set.seed(17)
+bag_tree_fit <- 
+  bag_tree() %>% 
+  set_mode("censored regression") %>% 
+  set_engine("rpart") %>% 
+  fit(event_time ~ ., data = sim_tr)
+bag_tree_fit
+#> parsnip model object
+#> 
+#> 
+#> Bagging survival trees with 25 bootstrap replications 
+#> 
+#> Call: bagging.data.frame(formula = event_time ~ ., data = data)
+

Using this model, we’ll be able to make predictions of different types.

+
+
+

Survival Probability Prediction

+

This type of censored regression model can make static predictions via the predicted event time using predict(object, type = "time"). It can also make dynamic predictions regarding the probability of survival for each data point at specific times. The syntax for this is

+
predict(object, new_data, type = "survival", eval_time = numeric())
+

where eval_time is a vector of time points at which we want the corresponding estimates of the survivor function. Alternately, we can use the augment() function to get both types of prediction and automatically attach them to the data being predicted.

+

The largest event time in the training set is 18.1 so we will use a set of evaluation times between zero and 18. From there, we use augment():

+
time_points <- seq(0, 18, by = 0.25)
+
+val_pred <- augment(bag_tree_fit, sim_val, eval_time = time_points)
+val_pred
+#> # A tibble: 250 × 5
+#>    .pred             .pred_time event_time    X1      X2
+#>    <list>                 <dbl>     <Surv> <dbl>   <dbl>
+#>  1 <tibble [73 × 5]>       9.48      5.78      1 -0.630 
+#>  2 <tibble [73 × 5]>       6.67      3.26+     0  0.792 
+#>  3 <tibble [73 × 5]>       3.82      2.34      1  0.811 
+#>  4 <tibble [73 × 5]>       8.53      7.45+     1 -0.271 
+#>  5 <tibble [73 × 5]>       7.07      8.05      0  0.315 
+#>  6 <tibble [73 × 5]>       7.24     14.09      0  0.264 
+#>  7 <tibble [73 × 5]>      10.6       5.23+     0 -0.532 
+#>  8 <tibble [73 × 5]>      12.3       3.18+     0 -1.41  
+#>  9 <tibble [73 × 5]>      11.0       1.86      1 -0.851 
+#> 10 <tibble [73 × 5]>       6.03      7.20      1 -0.0937
+#> # ℹ 240 more rows
+

The .pred column contains the dynamic predictions in a list column. Since length(time_points) is 73, each data frame in the list column has that many rows:

+
val_pred$.pred[[1]]
+#> # A tibble: 73 × 5
+#>    .eval_time .pred_survival .weight_time .pred_censored .weight_censored
+#>         <dbl>          <dbl>        <dbl>          <dbl>            <dbl>
+#>  1       0             1            0              1                 1   
+#>  2       0.25          0.996        0.250          1                 1   
+#>  3       0.5           0.993        0.500          0.997             1.00
+#>  4       0.75          0.993        0.750          0.995             1.01
+#>  5       1             0.989        1.00           0.992             1.01
+#>  6       1.25          0.984        1.25           0.985             1.02
+#>  7       1.5           0.975        1.50           0.978             1.02
+#>  8       1.75          0.974        1.75           0.969             1.03
+#>  9       2             0.964        2.00           0.956             1.05
+#> 10       2.25          0.949        2.25           0.944             1.06
+#> # ℹ 63 more rows
+

The .pred_survival column has the predictions. We’ll see what the other columns represent in the next few sections.

+

Let’s plot the estimated survivor curves for the first 10 data points in the validation set:

+
# Unnest the list columns: 
+dyn_val_pred <- 
+  val_pred %>% 
+  select(.pred, event_time) %>% 
+  add_rowindex() %>% 
+  unnest(.pred) 
+
+dyn_val_pred %>% 
+  filter(.row <= 10) %>% 
+  ggplot(aes(.eval_time, .pred_survival, group = .row, col = factor(.row))) + 
+  geom_step(direction = "vh", linewidth = 1, alpha = 1 / 2, show.legend = FALSE) +
+  labs(x = "time", y = "survival probability")
+

+
+
+

Converting censored data to binary data

+

We follow the process described by Graf et al (1999) where observations at evaluation time t are categorized into three groups.

+ +

We can use the first two sets of data to compute binary performance metrics, but the third set cannot be used. Note that the size of the third group is likely to grow as the evaluation time increases. This means we’ll have fewer data points to evaluate with late evaluation times. This implies that the variation in the metrics will be considerable as time goes on. For our simulated training set:

+
dyn_val_pred %>% 
+  summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% 
+  ggplot() + 
+  geom_step(aes(.eval_time, num_usable)) +
+  labs(x = "time", y = "Number of Usable Samples") +
+  lims(y = c(0, nrow(sim_val)))
+

+

Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status).

+

How do we compute this probability? It appears that the standard approach is to compute a “reverse Kaplan-Meier” (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a fairly reliable non-parametric model for estimating the probability of being censored at a given time.

+

Every time a censored regression model is created using tidymodels, the RKM is estimated on the same data being used to fit the model and attached to the parsnip object.

+

For our simulated data, here is what the RKM curve looks like:

+

+

The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations.

+

For any dynamic computations, we multiply the contributions of the case by the inverse of the probability of being censored. This is called the inverse probability of censoring weights (IPCW). This should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations.

+
+

Some computational details

+

First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf suggests (as it seems most appropriate):

+
    +
  • If the observed time corresponds to an actual event, and that time is before the evaluation time, the probability of being censored is predicted at the observed time.
  • +
  • If the observed time is after the evaluation time, regardless of the status, the probability of being censored is predicted at the evaluation time.
  • +
+

Here’s an example using the first data point in the validation set:

+
dyn_val_pred %>% 
+  filter(.row == 1 & .eval_time %in% c(1, 5, 5.75, 10, 15)) %>% 
+  select(event_time, .eval_time, .weight_time, .pred_censored, .weight_censored)
+#> # A tibble: 5 × 5
+#>   event_time .eval_time .weight_time .pred_censored .weight_censored
+#>       <Surv>      <dbl>        <dbl>          <dbl>            <dbl>
+#> 1       5.78       1            1.00          0.992             1.01
+#> 2       5.78       5            5.00          0.779             1.28
+#> 3       5.78       5.75         5.75          0.714             1.40
+#> 4       5.78      10            5.78          0.710             1.41
+#> 5       5.78      15            5.78          0.710             1.41
+

The observed event time was 5.779. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time.

+

We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don’t have today’s data yet. In tidymodels, we calculate the probability of censoring is computed just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You’ll only really see a difference if there is a bulk of censored observations at the original evaluation time.

+

Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the model to include covariates (as well as models beyond the RKM).

+
time_as_binary_event <- function(surv, eval_time) {
+  event_time <- parsnip:::.extract_surv_time(surv)
+  status <- parsnip:::.extract_surv_status(surv)
+  is_event_before_t <- event_time <= eval_time & status == 1
+  
+  # Three possible contributions to the statistic from Graf 1999
+  # Censoring time before eval_time, no contribution (Graf category 3)
+  binary_res <- rep(NA_character_, length(event_time))
+  
+  # A real event prior to eval_time (Graf category 1)
+  binary_res <- ifelse(is_event_before_t, "event", binary_res)
+  
+  # Observed time greater than eval_time (Graf category 2)
+  binary_res <- ifelse(event_time > eval_time, "non-event", binary_res)
+  factor(binary_res, levels = c("event", "non-event"))
+}
+
+binary_encoding <- 
+  dyn_val_pred %>% 
+  mutate(
+    obs_class = time_as_binary_event(event_time, .eval_time),
+    pred_class = if_else(.pred_survival >= 1 / 2, "non-event", "event"),
+    pred_class = factor(pred_class, levels = c("event", "non-event")),
+  )
+

While another article delves into the details of performance metrics for censored data, we’ll look at the 2x2 confusion matrices at a few time points.

+

Let’s start with an evaluation time of 1.00. Ordinarily, we would simply use:

+
binary_encoding %>% 
+  filter(.eval_time == 1.00) %>% 
+  conf_mat(truth = obs_class, estimate = pred_class)
+

to compute the metric. This would ignore the censoring weights so we’ll add the case_weights argument to conf_mat():

+
#: label: conf-mat-01
+binary_encoding %>%
+  filter(.eval_time == 1.00) %>%
+  conf_mat(truth = obs_class,
+           estimate = pred_class,
+           case_weights = .weight_censored)
+#>            Truth
+#> Prediction   event non-event
+#>   event       0.00      0.00
+#>   non-event   6.03    244.99
+

The values in the cells are the sum of the censoring weights, There are 6 actual events (out of 249 usable values) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so there inverse values are also.

+

This early, performance looks very good but that is mostly since there are few events and they are all predicted well (at least with the default 50% cutoff).

+

Let’s shift to an evaluation time of 5.0.

+
#: label: conf-mat-05
+binary_encoding %>%
+  filter(.eval_time == 5.00) %>%
+  conf_mat(truth = obs_class,
+           estimate = pred_class,
+           case_weights = .weight_censored)
+#>            Truth
+#> Prediction  event non-event
+#>   event      72.6      24.4
+#>   non-event  30.7     119.4
+

Now we have fewer total observations to consider (205 instead of 249 usable values) and more events (93 this time). Performance is fairly good; the sensitivity is 70.3% and the specificty is 83%.

+

What happends when the evaluation time is 17?

+
#: label: conf-mat-17
+binary_encoding %>%
+  filter(.eval_time == 17.00) %>%
+  conf_mat(truth = obs_class,
+           estimate = pred_class,
+           case_weights = .weight_censored)
+#>            Truth
+#> Prediction  event non-event
+#>   event     245.4      24.7
+#>   non-event   0.0       0.0
+

The data are overwhelmingly events: 148 out of 149 observations. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is 1.81.

+

There’s more on dynamic performance metrics in another article.

+
+
+
+

Summary

+

When converting censored event time data to a binary format, the main point to remember are:

+ +
+
+

Session information

+
#> ─ Session info ─────────────────────────────────────────────────────
+#>  setting  value
+#>  version  R version 4.2.3 (2023-03-15)
+#>  os       macOS Big Sur ... 10.16
+#>  system   x86_64, darwin17.0
+#>  ui       X11
+#>  language (EN)
+#>  collate  en_US.UTF-8
+#>  ctype    en_US.UTF-8
+#>  tz       America/New_York
+#>  date     2023-05-15
+#>  pandoc   3.1.1 @ /usr/local/bin/ (via rmarkdown)
+#> 
+#> ─ Packages ─────────────────────────────────────────────────────────
+#>  package    * version    date (UTC) lib source
+#>  broom      * 1.0.4      2023-03-11 [1] CRAN (R 4.2.0)
+#>  censored   * 0.1.1.9003 2023-04-07 [1] Github (tidymodels/censored@b78d6a9)
+#>  dials      * 1.2.0      2023-04-03 [1] CRAN (R 4.2.0)
+#>  dplyr      * 1.1.2      2023-04-20 [1] CRAN (R 4.2.0)
+#>  ggplot2    * 3.4.2      2023-04-03 [1] CRAN (R 4.2.0)
+#>  infer      * 1.0.4      2022-12-02 [1] CRAN (R 4.2.0)
+#>  parsnip    * 1.1.0.9001 2023-05-15 [1] Github (tidymodels/parsnip@ab42409)
+#>  prodlim    * 2023.03.31 2023-04-02 [1] CRAN (R 4.2.0)
+#>  purrr      * 1.0.1      2023-01-10 [1] CRAN (R 4.2.0)
+#>  recipes    * 1.0.6      2023-04-25 [1] CRAN (R 4.2.0)
+#>  rlang        1.1.1      2023-04-28 [1] CRAN (R 4.2.0)
+#>  rsample    * 1.1.1      2022-12-07 [1] CRAN (R 4.2.0)
+#>  tibble     * 3.2.1      2023-03-20 [1] CRAN (R 4.2.0)
+#>  tidymodels * 1.0.0      2022-07-13 [1] CRAN (R 4.2.0)
+#>  tune       * 1.1.1.9001 2023-05-15 [1] Github (tidymodels/tune@fdc0016)
+#>  workflows  * 1.1.3      2023-02-22 [1] CRAN (R 4.2.0)
+#>  yardstick  * 1.2.0      2023-04-21 [1] CRAN (R 4.2.0)
+#> 
+#>  [1] /Users/max/Library/R/x86_64/4.2/library
+#>  [2] /Library/Frameworks/R.framework/Versions/4.2/Resources/library
+#> 
+#> ────────────────────────────────────────────────────────────────────
+
From 2a2a0ee3d45bfd37827d341fd7a9762c7dbd5b99 Mon Sep 17 00:00:00 2001 From: Max Kuhn Date: Tue, 16 May 2023 09:53:00 -0400 Subject: [PATCH 2/6] text updates --- .../{index.Rmarkdown.Rmd => index.Rmarkdown} | 47 ++- .../survival-ipcw/index.Rmarkdown.html | 273 ------------- .../statistics/survival-ipcw/index.markdown | 372 ++++++++++++++++++ 3 files changed, 400 insertions(+), 292 deletions(-) rename content/learn/statistics/survival-ipcw/{index.Rmarkdown.Rmd => index.Rmarkdown} (81%) delete mode 100644 content/learn/statistics/survival-ipcw/index.Rmarkdown.html create mode 100644 content/learn/statistics/survival-ipcw/index.markdown diff --git a/content/learn/statistics/survival-ipcw/index.Rmarkdown.Rmd b/content/learn/statistics/survival-ipcw/index.Rmarkdown similarity index 81% rename from content/learn/statistics/survival-ipcw/index.Rmarkdown.Rmd rename to content/learn/statistics/survival-ipcw/index.Rmarkdown index 161abbcc..99d3f9d8 100644 --- a/content/learn/statistics/survival-ipcw/index.Rmarkdown.Rmd +++ b/content/learn/statistics/survival-ipcw/index.Rmarkdown @@ -21,12 +21,20 @@ tidymodels_prefer() options(pillar.advice = FALSE, pillar.min_title_chars = Inf) ``` +`r req_pkgs(pkgs)` You'll need the development versions of censored and parsnip. To install these, use + +```r +library(pak) + +pak::pak(c("tidymodels/censored", "tidymodels/parsnip")) +``` + ## Introduction One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call the dynamic performance metrics. -Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's another article though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). +Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's [another article]() though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). The section below discusses the practical aspects of moving to a binary outcome and the statistical implications. Before we start, though, let's define the various types of times that will be mentioned: @@ -34,7 +42,7 @@ The section below discusses the practical aspects of moving to a binary outcome - Event time: observed times for actual events - Evaluation time: the time, specified by the analyst, that the model is evaluated. -For example, we'll simulate some data using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=) using the prodlim package. A training and validation set are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: +For example, we'll simulate some data using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=) using the prodlim package. A training and validation sets are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: ```{r} #| label: data @@ -67,17 +75,17 @@ bag_tree_fit <- bag_tree_fit ``` -Using this model, we'll be able to make predictions of different types. +Using this model, we'll can make predictions of different types. ## Survival Probability Prediction -This type of censored regression model can make static predictions via the predicted event time using `predict(object, type = "time")`. It can also make dynamic predictions regarding the probability of survival for each data point at specific times. The syntax for this is +This censored regression model can make static predictions via the predicted event time using `predict(object, type = "time")`. It can also create dynamic predictions regarding the probability of survival for each data point at specific times. The syntax for this is ```r predict(object, new_data, type = "survival", eval_time = numeric()) ``` -where `eval_time` is a vector of time points at which we want the corresponding estimates of the survivor function. Alternately, we can use the `augment()` function to get both types of prediction and automatically attach them to the data being predicted. +where `eval_time` is a vector of time points at which we want the corresponding survivor function estimates. Alternatively, we can use the `augment()` function to get both types of prediction and automatically attach them to the data. The largest event time in the training set is `r round(max(sim_tr$event_time[,1]), 3)` so we will use a set of evaluation times between zero and 18. From there, we use `augment()`: @@ -98,7 +106,7 @@ The `.pred` column contains the dynamic predictions in a list column. Since `len val_pred$.pred[[1]] ``` -The `.pred_survival` column has the predictions. We'll see what the other columns represent in the next few sections. +The `.pred_survival` column has the predictions. We'll see what the other columns represent in the following few sections. Let's plot the estimated survivor curves for the first 10 data points in the validation set: @@ -141,9 +149,9 @@ dyn_val_pred %>% Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). -How do we compute this probability? It appears that the standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a fairly reliable non-parametric model for estimating the probability of being censored at a given time. +How do we compute this probability? The standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a reasonably reliable non-parametric model for estimating the probability of being censored at a given time. -Every time a censored regression model is created using tidymodels, the RKM is estimated on the same data being used to fit the model and attached to the parsnip object. +Every time a censored regression model is created using tidymodels, the RKM is estimated on the same data used to fit the model and attached to the parsnip object. For our simulated data, here is what the RKM curve looks like: @@ -172,11 +180,11 @@ dyn_val_pred %>% The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations. -For any dynamic computations, we multiply the contributions of the case by the inverse of the probability of being censored. This is called the inverse probability of censoring weights (IPCW). This should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations. +For any dynamic computations, we multiply the case's contributions by the inverse probability of being censored. This is called the inverse probability of censoring weights (IPCW). This should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations. ### Some computational details -First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf suggests (as it seems most appropriate): +First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf suggests (as it seems most appropriate): - If the observed time corresponds to an actual event, and that time is before the evaluation time, the probability of being censored is predicted at the observed time. - If the observed time is after the evaluation time, regardless of the status, the probability of being censored is predicted at the evaluation time. @@ -197,8 +205,9 @@ We also slightly modify the time that the censoring probability is computed. If Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the model to include covariates (as well as models beyond the RKM). ```{r} +# TODO I'm going to add this to parsnip time_as_binary_event <- function(surv, eval_time) { - event_time <- parsnip:::.extract_surv_time(surv) + event_time <- parsnip:::.extract_surv_time(surv) # TODO we should export these status <- parsnip:::.extract_surv_status(surv) is_event_before_t <- event_time <= eval_time & status == 1 @@ -207,10 +216,10 @@ time_as_binary_event <- function(surv, eval_time) { binary_res <- rep(NA_character_, length(event_time)) # A real event prior to eval_time (Graf category 1) - binary_res <- ifelse(is_event_before_t, "event", binary_res) + binary_res <- if_else(is_event_before_t, "event", binary_res) # Observed time greater than eval_time (Graf category 2) - binary_res <- ifelse(event_time > eval_time, "non-event", binary_res) + binary_res <- if_else(event_time > eval_time, "non-event", binary_res) factor(binary_res, levels = c("event", "non-event")) } @@ -233,10 +242,10 @@ binary_encoding %>% conf_mat(truth = obs_class, estimate = pred_class) ``` -to compute the metric. This would ignore the censoring weights so we'll add the `case_weights` argument to `conf_mat()`: +to compute the metric. This would ignore the censoring weights so we'll add the `case_weights` argument: ```{r} -#: label: conf-mat-01 +#| label: conf-mat-01 binary_encoding %>% filter(.eval_time == 1.00) %>% conf_mat(truth = obs_class, @@ -259,7 +268,7 @@ This early, performance looks very good but that is mostly since there are few e Let's shift to an evaluation time of 5.0. ```{r} -#: label: conf-mat-05 +#| label: conf-mat-05 binary_encoding %>% filter(.eval_time == 5.00) %>% conf_mat(truth = obs_class, @@ -286,7 +295,7 @@ Now we have fewer total observations to consider (`r nrow(dat_05)` instead of `r What happends when the evaluation time is 17? ```{r} -#: label: conf-mat-17 +#| label: conf-mat-17 binary_encoding %>% filter(.eval_time == 17.00) %>% conf_mat(truth = obs_class, @@ -307,9 +316,9 @@ stats_17 <- case_weights = .weight_censored) ``` -The data are overwhelmingly events: `r events_17` out of `r nrow(dat_17)` observations. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is `r round(mean(dat_17$.weight_censored), 2)`. +The data are overwhelmingly events. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is `r round(mean(dat_17$.weight_censored), 2)`. -There's more on dynamic performance metrics in another article. +There's more on dynamic performance metrics in [another article](). ## Summary diff --git a/content/learn/statistics/survival-ipcw/index.Rmarkdown.html b/content/learn/statistics/survival-ipcw/index.Rmarkdown.html deleted file mode 100644 index 22f4cb1a..00000000 --- a/content/learn/statistics/survival-ipcw/index.Rmarkdown.html +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: "Understanding Inverse Probability of Censoring Weights (IPCW)" -tags: [survival,censored,parsnip] -categories: [statistical analysis] -type: learn-subsection -weight: 9 -description: | - Learn how tidymodels uses causal inference tools to measure performance of - survival models. ---- - - - -
-

Introduction

-

One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call the dynamic performance metrics.

-

Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that’s another article though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary “was there an event at time t?” version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50).

-

The section below discusses the practical aspects of moving to a binary outcome and the statistical implications. Before we start, though, let’s define the various types of times that will be mentioned:

-
    -
  • Observed time: time recorded in the data
  • -
  • Event time: observed times for actual events
  • -
  • Evaluation time: the time, specified by the analyst, that the model is evaluated.
  • -
-

For example, we’ll simulate some data using the methods of Bender et al (2005) using the prodlim package. A training and validation set are simulated. We’ll also load the censored package so that we can fit a model to these time-to-event data:

-
library(tidymodels)
-library(censored)
-#> Loading required package: survival
-library(prodlim)
-
-set.seed(5882)
-sim_dat <- SimSurv(1000) %>%
-  mutate(event_time = Surv(time, event)) %>%
-  select(event_time, X1, X2)
-
-set.seed(2)
-split   <- initial_split(sim_dat)
-sim_tr  <- training(split)
-sim_val <- testing(split)
-

We’ll need a model to illustrate the code and concepts. Let’s fit a bagged survival tree model to the training set:

-
set.seed(17)
-bag_tree_fit <- 
-  bag_tree() %>% 
-  set_mode("censored regression") %>% 
-  set_engine("rpart") %>% 
-  fit(event_time ~ ., data = sim_tr)
-bag_tree_fit
-#> parsnip model object
-#> 
-#> 
-#> Bagging survival trees with 25 bootstrap replications 
-#> 
-#> Call: bagging.data.frame(formula = event_time ~ ., data = data)
-

Using this model, we’ll be able to make predictions of different types.

-
-
-

Survival Probability Prediction

-

This type of censored regression model can make static predictions via the predicted event time using predict(object, type = "time"). It can also make dynamic predictions regarding the probability of survival for each data point at specific times. The syntax for this is

-
predict(object, new_data, type = "survival", eval_time = numeric())
-

where eval_time is a vector of time points at which we want the corresponding estimates of the survivor function. Alternately, we can use the augment() function to get both types of prediction and automatically attach them to the data being predicted.

-

The largest event time in the training set is 18.1 so we will use a set of evaluation times between zero and 18. From there, we use augment():

-
time_points <- seq(0, 18, by = 0.25)
-
-val_pred <- augment(bag_tree_fit, sim_val, eval_time = time_points)
-val_pred
-#> # A tibble: 250 × 5
-#>    .pred             .pred_time event_time    X1      X2
-#>    <list>                 <dbl>     <Surv> <dbl>   <dbl>
-#>  1 <tibble [73 × 5]>       9.48      5.78      1 -0.630 
-#>  2 <tibble [73 × 5]>       6.67      3.26+     0  0.792 
-#>  3 <tibble [73 × 5]>       3.82      2.34      1  0.811 
-#>  4 <tibble [73 × 5]>       8.53      7.45+     1 -0.271 
-#>  5 <tibble [73 × 5]>       7.07      8.05      0  0.315 
-#>  6 <tibble [73 × 5]>       7.24     14.09      0  0.264 
-#>  7 <tibble [73 × 5]>      10.6       5.23+     0 -0.532 
-#>  8 <tibble [73 × 5]>      12.3       3.18+     0 -1.41  
-#>  9 <tibble [73 × 5]>      11.0       1.86      1 -0.851 
-#> 10 <tibble [73 × 5]>       6.03      7.20      1 -0.0937
-#> # ℹ 240 more rows
-

The .pred column contains the dynamic predictions in a list column. Since length(time_points) is 73, each data frame in the list column has that many rows:

-
val_pred$.pred[[1]]
-#> # A tibble: 73 × 5
-#>    .eval_time .pred_survival .weight_time .pred_censored .weight_censored
-#>         <dbl>          <dbl>        <dbl>          <dbl>            <dbl>
-#>  1       0             1            0              1                 1   
-#>  2       0.25          0.996        0.250          1                 1   
-#>  3       0.5           0.993        0.500          0.997             1.00
-#>  4       0.75          0.993        0.750          0.995             1.01
-#>  5       1             0.989        1.00           0.992             1.01
-#>  6       1.25          0.984        1.25           0.985             1.02
-#>  7       1.5           0.975        1.50           0.978             1.02
-#>  8       1.75          0.974        1.75           0.969             1.03
-#>  9       2             0.964        2.00           0.956             1.05
-#> 10       2.25          0.949        2.25           0.944             1.06
-#> # ℹ 63 more rows
-

The .pred_survival column has the predictions. We’ll see what the other columns represent in the next few sections.

-

Let’s plot the estimated survivor curves for the first 10 data points in the validation set:

-
# Unnest the list columns: 
-dyn_val_pred <- 
-  val_pred %>% 
-  select(.pred, event_time) %>% 
-  add_rowindex() %>% 
-  unnest(.pred) 
-
-dyn_val_pred %>% 
-  filter(.row <= 10) %>% 
-  ggplot(aes(.eval_time, .pred_survival, group = .row, col = factor(.row))) + 
-  geom_step(direction = "vh", linewidth = 1, alpha = 1 / 2, show.legend = FALSE) +
-  labs(x = "time", y = "survival probability")
-

-
-
-

Converting censored data to binary data

-

We follow the process described by Graf et al (1999) where observations at evaluation time t are categorized into three groups.

-
    -
  • Definitive non-events: event times are less than the evaluation time (“it hasn’t happened yet”)
  • -
  • Definitive events: Observed times (censored or not) are greater than the evaluation time (“it happens sometime after now”).
  • -
  • Ambiguous outcomes: Observed censored time is less than the evaluation time (“maybe it happens, maybe not”).
  • -
-

We can use the first two sets of data to compute binary performance metrics, but the third set cannot be used. Note that the size of the third group is likely to grow as the evaluation time increases. This means we’ll have fewer data points to evaluate with late evaluation times. This implies that the variation in the metrics will be considerable as time goes on. For our simulated training set:

-
dyn_val_pred %>% 
-  summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% 
-  ggplot() + 
-  geom_step(aes(.eval_time, num_usable)) +
-  labs(x = "time", y = "Number of Usable Samples") +
-  lims(y = c(0, nrow(sim_val)))
-

-

Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status).

-

How do we compute this probability? It appears that the standard approach is to compute a “reverse Kaplan-Meier” (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a fairly reliable non-parametric model for estimating the probability of being censored at a given time.

-

Every time a censored regression model is created using tidymodels, the RKM is estimated on the same data being used to fit the model and attached to the parsnip object.

-

For our simulated data, here is what the RKM curve looks like:

-

-

The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations.

-

For any dynamic computations, we multiply the contributions of the case by the inverse of the probability of being censored. This is called the inverse probability of censoring weights (IPCW). This should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations.

-
-

Some computational details

-

First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf suggests (as it seems most appropriate):

-
    -
  • If the observed time corresponds to an actual event, and that time is before the evaluation time, the probability of being censored is predicted at the observed time.
  • -
  • If the observed time is after the evaluation time, regardless of the status, the probability of being censored is predicted at the evaluation time.
  • -
-

Here’s an example using the first data point in the validation set:

-
dyn_val_pred %>% 
-  filter(.row == 1 & .eval_time %in% c(1, 5, 5.75, 10, 15)) %>% 
-  select(event_time, .eval_time, .weight_time, .pred_censored, .weight_censored)
-#> # A tibble: 5 × 5
-#>   event_time .eval_time .weight_time .pred_censored .weight_censored
-#>       <Surv>      <dbl>        <dbl>          <dbl>            <dbl>
-#> 1       5.78       1            1.00          0.992             1.01
-#> 2       5.78       5            5.00          0.779             1.28
-#> 3       5.78       5.75         5.75          0.714             1.40
-#> 4       5.78      10            5.78          0.710             1.41
-#> 5       5.78      15            5.78          0.710             1.41
-

The observed event time was 5.779. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time.

-

We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don’t have today’s data yet. In tidymodels, we calculate the probability of censoring is computed just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You’ll only really see a difference if there is a bulk of censored observations at the original evaluation time.

-

Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the model to include covariates (as well as models beyond the RKM).

-
time_as_binary_event <- function(surv, eval_time) {
-  event_time <- parsnip:::.extract_surv_time(surv)
-  status <- parsnip:::.extract_surv_status(surv)
-  is_event_before_t <- event_time <= eval_time & status == 1
-  
-  # Three possible contributions to the statistic from Graf 1999
-  # Censoring time before eval_time, no contribution (Graf category 3)
-  binary_res <- rep(NA_character_, length(event_time))
-  
-  # A real event prior to eval_time (Graf category 1)
-  binary_res <- ifelse(is_event_before_t, "event", binary_res)
-  
-  # Observed time greater than eval_time (Graf category 2)
-  binary_res <- ifelse(event_time > eval_time, "non-event", binary_res)
-  factor(binary_res, levels = c("event", "non-event"))
-}
-
-binary_encoding <- 
-  dyn_val_pred %>% 
-  mutate(
-    obs_class = time_as_binary_event(event_time, .eval_time),
-    pred_class = if_else(.pred_survival >= 1 / 2, "non-event", "event"),
-    pred_class = factor(pred_class, levels = c("event", "non-event")),
-  )
-

While another article delves into the details of performance metrics for censored data, we’ll look at the 2x2 confusion matrices at a few time points.

-

Let’s start with an evaluation time of 1.00. Ordinarily, we would simply use:

-
binary_encoding %>% 
-  filter(.eval_time == 1.00) %>% 
-  conf_mat(truth = obs_class, estimate = pred_class)
-

to compute the metric. This would ignore the censoring weights so we’ll add the case_weights argument to conf_mat():

-
#: label: conf-mat-01
-binary_encoding %>%
-  filter(.eval_time == 1.00) %>%
-  conf_mat(truth = obs_class,
-           estimate = pred_class,
-           case_weights = .weight_censored)
-#>            Truth
-#> Prediction   event non-event
-#>   event       0.00      0.00
-#>   non-event   6.03    244.99
-

The values in the cells are the sum of the censoring weights, There are 6 actual events (out of 249 usable values) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so there inverse values are also.

-

This early, performance looks very good but that is mostly since there are few events and they are all predicted well (at least with the default 50% cutoff).

-

Let’s shift to an evaluation time of 5.0.

-
#: label: conf-mat-05
-binary_encoding %>%
-  filter(.eval_time == 5.00) %>%
-  conf_mat(truth = obs_class,
-           estimate = pred_class,
-           case_weights = .weight_censored)
-#>            Truth
-#> Prediction  event non-event
-#>   event      72.6      24.4
-#>   non-event  30.7     119.4
-

Now we have fewer total observations to consider (205 instead of 249 usable values) and more events (93 this time). Performance is fairly good; the sensitivity is 70.3% and the specificty is 83%.

-

What happends when the evaluation time is 17?

-
#: label: conf-mat-17
-binary_encoding %>%
-  filter(.eval_time == 17.00) %>%
-  conf_mat(truth = obs_class,
-           estimate = pred_class,
-           case_weights = .weight_censored)
-#>            Truth
-#> Prediction  event non-event
-#>   event     245.4      24.7
-#>   non-event   0.0       0.0
-

The data are overwhelmingly events: 148 out of 149 observations. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is 1.81.

-

There’s more on dynamic performance metrics in another article.

-
-
-
-

Summary

-

When converting censored event time data to a binary format, the main point to remember are:

-
    -
  • Some data points cannot be used in the calculations.
  • -
  • To properly estimate statistical quantities, we weight the computations by the inverse of the probability of being censored.
  • -
  • tidymodels currently assumes non-informative censoring.
  • -
-
-
-

Session information

-
#> ─ Session info ─────────────────────────────────────────────────────
-#>  setting  value
-#>  version  R version 4.2.3 (2023-03-15)
-#>  os       macOS Big Sur ... 10.16
-#>  system   x86_64, darwin17.0
-#>  ui       X11
-#>  language (EN)
-#>  collate  en_US.UTF-8
-#>  ctype    en_US.UTF-8
-#>  tz       America/New_York
-#>  date     2023-05-15
-#>  pandoc   3.1.1 @ /usr/local/bin/ (via rmarkdown)
-#> 
-#> ─ Packages ─────────────────────────────────────────────────────────
-#>  package    * version    date (UTC) lib source
-#>  broom      * 1.0.4      2023-03-11 [1] CRAN (R 4.2.0)
-#>  censored   * 0.1.1.9003 2023-04-07 [1] Github (tidymodels/censored@b78d6a9)
-#>  dials      * 1.2.0      2023-04-03 [1] CRAN (R 4.2.0)
-#>  dplyr      * 1.1.2      2023-04-20 [1] CRAN (R 4.2.0)
-#>  ggplot2    * 3.4.2      2023-04-03 [1] CRAN (R 4.2.0)
-#>  infer      * 1.0.4      2022-12-02 [1] CRAN (R 4.2.0)
-#>  parsnip    * 1.1.0.9001 2023-05-15 [1] Github (tidymodels/parsnip@ab42409)
-#>  prodlim    * 2023.03.31 2023-04-02 [1] CRAN (R 4.2.0)
-#>  purrr      * 1.0.1      2023-01-10 [1] CRAN (R 4.2.0)
-#>  recipes    * 1.0.6      2023-04-25 [1] CRAN (R 4.2.0)
-#>  rlang        1.1.1      2023-04-28 [1] CRAN (R 4.2.0)
-#>  rsample    * 1.1.1      2022-12-07 [1] CRAN (R 4.2.0)
-#>  tibble     * 3.2.1      2023-03-20 [1] CRAN (R 4.2.0)
-#>  tidymodels * 1.0.0      2022-07-13 [1] CRAN (R 4.2.0)
-#>  tune       * 1.1.1.9001 2023-05-15 [1] Github (tidymodels/tune@fdc0016)
-#>  workflows  * 1.1.3      2023-02-22 [1] CRAN (R 4.2.0)
-#>  yardstick  * 1.2.0      2023-04-21 [1] CRAN (R 4.2.0)
-#> 
-#>  [1] /Users/max/Library/R/x86_64/4.2/library
-#>  [2] /Library/Frameworks/R.framework/Versions/4.2/Resources/library
-#> 
-#> ────────────────────────────────────────────────────────────────────
-
diff --git a/content/learn/statistics/survival-ipcw/index.markdown b/content/learn/statistics/survival-ipcw/index.markdown new file mode 100644 index 00000000..6c5d257b --- /dev/null +++ b/content/learn/statistics/survival-ipcw/index.markdown @@ -0,0 +1,372 @@ +--- +title: "Understanding Inverse Probability of Censoring Weights (IPCW)" +tags: [survival,censored,parsnip] +categories: [statistical analysis] +type: learn-subsection +weight: 9 +description: | + Learn how tidymodels uses causal inference tools to measure performance of + survival models. +--- + + + + + +To use the code in this article, you will need to install the following packages: censored, prodlim, and tidymodels. You'll need the development versions of censored and parsnip. To install these, use + +```r +library(pak) + +pak::pak(c("tidymodels/censored", "tidymodels/parsnip")) +``` + +## Introduction + + +One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call the dynamic performance metrics. + +Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's [another article]() though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). + +The section below discusses the practical aspects of moving to a binary outcome and the statistical implications. Before we start, though, let's define the various types of times that will be mentioned: + +- Observed time: time recorded in the data +- Event time: observed times for actual events +- Evaluation time: the time, specified by the analyst, that the model is evaluated. + +For example, we'll simulate some data using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=) using the prodlim package. A training and validation sets are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: + + +```r +library(tidymodels) +library(censored) +#> Loading required package: survival +library(prodlim) + +set.seed(5882) +sim_dat <- SimSurv(1000) %>% + mutate(event_time = Surv(time, event)) %>% + select(event_time, X1, X2) + +set.seed(2) +split <- initial_split(sim_dat) +sim_tr <- training(split) +sim_val <- testing(split) +``` + +We'll need a model to illustrate the code and concepts. Let's fit a bagged survival tree model to the training set: + + +```r +set.seed(17) +bag_tree_fit <- + bag_tree() %>% + set_mode("censored regression") %>% + set_engine("rpart") %>% + fit(event_time ~ ., data = sim_tr) +bag_tree_fit +#> parsnip model object +#> +#> +#> Bagging survival trees with 25 bootstrap replications +#> +#> Call: bagging.data.frame(formula = event_time ~ ., data = data) +``` + +Using this model, we'll can make predictions of different types. + +## Survival Probability Prediction + +This censored regression model can make static predictions via the predicted event time using `predict(object, type = "time")`. It can also create dynamic predictions regarding the probability of survival for each data point at specific times. The syntax for this is + +```r +predict(object, new_data, type = "survival", eval_time = numeric()) +``` + +where `eval_time` is a vector of time points at which we want the corresponding survivor function estimates. Alternatively, we can use the `augment()` function to get both types of prediction and automatically attach them to the data. + +The largest event time in the training set is 18.1 so we will use a set of evaluation times between zero and 18. From there, we use `augment()`: + + +```r +time_points <- seq(0, 18, by = 0.25) + +val_pred <- augment(bag_tree_fit, sim_val, eval_time = time_points) +val_pred +#> # A tibble: 250 × 5 +#> .pred .pred_time event_time X1 X2 +#> +#> 1 9.48 5.78 1 -0.630 +#> 2 6.67 3.26+ 0 0.792 +#> 3 3.82 2.34 1 0.811 +#> 4 8.53 7.45+ 1 -0.271 +#> 5 7.07 8.05 0 0.315 +#> 6 7.24 14.09 0 0.264 +#> 7 10.6 5.23+ 0 -0.532 +#> 8 12.3 3.18+ 0 -1.41 +#> 9 11.0 1.86 1 -0.851 +#> 10 6.03 7.20 1 -0.0937 +#> # ℹ 240 more rows +``` + +The `.pred` column contains the dynamic predictions in a list column. Since `length(time_points)` is 73, each data frame in the list column has that many rows: + + +```r +val_pred$.pred[[1]] +#> # A tibble: 73 × 5 +#> .eval_time .pred_survival .weight_time .pred_censored .weight_censored +#> +#> 1 0 1 0 1 1 +#> 2 0.25 0.996 0.250 1 1 +#> 3 0.5 0.993 0.500 0.997 1.00 +#> 4 0.75 0.993 0.750 0.995 1.01 +#> 5 1 0.989 1.00 0.992 1.01 +#> 6 1.25 0.984 1.25 0.985 1.02 +#> 7 1.5 0.975 1.50 0.978 1.02 +#> 8 1.75 0.974 1.75 0.969 1.03 +#> 9 2 0.964 2.00 0.956 1.05 +#> 10 2.25 0.949 2.25 0.944 1.06 +#> # ℹ 63 more rows +``` + +The `.pred_survival` column has the predictions. We'll see what the other columns represent in the following few sections. + +Let's plot the estimated survivor curves for the first 10 data points in the validation set: + + +```r +# Unnest the list columns: +dyn_val_pred <- + val_pred %>% + select(.pred, event_time) %>% + add_rowindex() %>% + unnest(.pred) + +dyn_val_pred %>% + filter(.row <= 10) %>% + ggplot(aes(.eval_time, .pred_survival, group = .row, col = factor(.row))) + + geom_step(direction = "vh", linewidth = 1, alpha = 1 / 2, show.legend = FALSE) + + labs(x = "time", y = "survival probability") +``` + + + +## Converting censored data to binary data + +We follow the process described by [Graf _et al_ (1999)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Assessment+and+Comparison+of+Prognostic+Classification+Schemes+for+Survival+Data.%22&btnG=) where observations at evaluation time `t` are categorized into three groups. + + - **Definitive non-events**: event times are less than the evaluation time ("it hasn't happened yet") + - **Definitive events**: Observed times (censored or not) are greater than the evaluation time ("it happens sometime after now"). + - **Ambiguous outcomes**: Observed censored time is less than the evaluation time ("maybe it happens, maybe not"). + +We can use the first two sets of data to compute binary performance metrics, but the third set cannot be used. Note that the size of the third group is likely to grow as the evaluation time increases. This means we'll have fewer data points to evaluate with late evaluation times. This implies that the variation in the metrics will be considerable as time goes on. For our simulated training set: + + +```r +dyn_val_pred %>% + summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% + ggplot() + + geom_step(aes(.eval_time, num_usable)) + + labs(x = "time", y = "Number of Usable Samples") + + lims(y = c(0, nrow(sim_val))) +``` + + + +Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). + +How do we compute this probability? The standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a reasonably reliable non-parametric model for estimating the probability of being censored at a given time. + +Every time a censored regression model is created using tidymodels, the RKM is estimated on the same data used to fit the model and attached to the parsnip object. + +For our simulated data, here is what the RKM curve looks like: + + + +The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations. + +For any dynamic computations, we multiply the case's contributions by the inverse probability of being censored. This is called the inverse probability of censoring weights (IPCW). This should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations. + +### Some computational details + +First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf suggests (as it seems most appropriate): + + - If the observed time corresponds to an actual event, and that time is before the evaluation time, the probability of being censored is predicted at the observed time. + - If the observed time is after the evaluation time, regardless of the status, the probability of being censored is predicted at the evaluation time. + +Here's an example using the first data point in the validation set: + + +```r +dyn_val_pred %>% + filter(.row == 1 & .eval_time %in% c(1, 5, 5.75, 10, 15)) %>% + select(event_time, .eval_time, .weight_time, .pred_censored, .weight_censored) +#> # A tibble: 5 × 5 +#> event_time .eval_time .weight_time .pred_censored .weight_censored +#> +#> 1 5.78 1 1.00 0.992 1.01 +#> 2 5.78 5 5.00 0.779 1.28 +#> 3 5.78 5.75 5.75 0.714 1.40 +#> 4 5.78 10 5.78 0.710 1.41 +#> 5 5.78 15 5.78 0.710 1.41 +``` + +The observed event time was 5.779. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. + +We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring is computed just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. + +Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the model to include covariates (as well as models beyond the RKM). + + +```r +# TODO I'm going to add this to parsnip +time_as_binary_event <- function(surv, eval_time) { + event_time <- parsnip:::.extract_surv_time(surv) # TODO we should export these + status <- parsnip:::.extract_surv_status(surv) + is_event_before_t <- event_time <= eval_time & status == 1 + + # Three possible contributions to the statistic from Graf 1999 + # Censoring time before eval_time, no contribution (Graf category 3) + binary_res <- rep(NA_character_, length(event_time)) + + # A real event prior to eval_time (Graf category 1) + binary_res <- if_else(is_event_before_t, "event", binary_res) + + # Observed time greater than eval_time (Graf category 2) + binary_res <- if_else(event_time > eval_time, "non-event", binary_res) + factor(binary_res, levels = c("event", "non-event")) +} + +binary_encoding <- + dyn_val_pred %>% + mutate( + obs_class = time_as_binary_event(event_time, .eval_time), + pred_class = if_else(.pred_survival >= 1 / 2, "non-event", "event"), + pred_class = factor(pred_class, levels = c("event", "non-event")), + ) +``` + +While another article delves into the details of performance metrics for censored data, we'll look at the 2x2 confusion matrices at a few time points. + +Let's start with an evaluation time of 1.00. Ordinarily, we would simply use: + +```r +binary_encoding %>% + filter(.eval_time == 1.00) %>% + conf_mat(truth = obs_class, estimate = pred_class) +``` + +to compute the metric. This would ignore the censoring weights so we'll add the `case_weights` argument: + + +```r +binary_encoding %>% + filter(.eval_time == 1.00) %>% + conf_mat(truth = obs_class, + estimate = pred_class, + case_weights = .weight_censored) +#> Truth +#> Prediction event non-event +#> event 0.00 0.00 +#> non-event 6.03 244.99 +``` + + + + +The values in the cells are the sum of the censoring weights, There are 6 actual events (out of 249 usable values) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so there inverse values are also. + +This early, performance looks very good but that is mostly since there are few events and they are all predicted well (at least with the default 50% cutoff). + +Let's shift to an evaluation time of 5.0. + + +```r +binary_encoding %>% + filter(.eval_time == 5.00) %>% + conf_mat(truth = obs_class, + estimate = pred_class, + case_weights = .weight_censored) +#> Truth +#> Prediction event non-event +#> event 72.6 24.4 +#> non-event 30.7 119.4 +``` + + + +Now we have fewer total observations to consider (205 instead of 249 usable values) and more events (93 this time). Performance is fairly good; the sensitivity is 70.3% and the specificty is 83%. + +What happends when the evaluation time is 17? + + +```r +binary_encoding %>% + filter(.eval_time == 17.00) %>% + conf_mat(truth = obs_class, + estimate = pred_class, + case_weights = .weight_censored) +#> Truth +#> Prediction event non-event +#> event 245.4 24.7 +#> non-event 0.0 0.0 +``` + + + +The data are overwhelmingly events. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is 1.81. + +There's more on dynamic performance metrics in [another article](). + +## Summary + +When converting censored event time data to a binary format, the main point to remember are: + +* Some data points cannot be used in the calculations. +* To properly estimate statistical quantities, we weight the computations by the inverse of the probability of being censored. +* tidymodels currently assumes non-informative censoring. + + +## Session information + + +``` +#> ─ Session info ───────────────────────────────────────────────────── +#> setting value +#> version R version 4.2.0 (2022-04-22) +#> os macOS Big Sur/Monterey 10.16 +#> system x86_64, darwin17.0 +#> ui X11 +#> language (EN) +#> collate en_US.UTF-8 +#> ctype en_US.UTF-8 +#> tz America/New_York +#> date 2023-05-16 +#> pandoc 3.1.1 @ /Applications/RStudio.app/Contents/Resources/app/quarto/bin/tools/ (via rmarkdown) +#> +#> ─ Packages ───────────────────────────────────────────────────────── +#> package * version date (UTC) lib source +#> broom * 1.0.4 2023-03-11 [1] CRAN (R 4.2.0) +#> censored * 0.2.0.9000 2023-05-16 [1] Github (tidymodels/censored@f9eccb6) +#> dials * 1.2.0 2023-04-03 [1] CRAN (R 4.2.0) +#> dplyr * 1.1.2 2023-04-20 [1] CRAN (R 4.2.0) +#> ggplot2 * 3.4.2 2023-04-03 [1] CRAN (R 4.2.0) +#> infer * 1.0.4 2022-12-02 [1] CRAN (R 4.2.0) +#> parsnip * 1.1.0.9001 2023-05-16 [1] Github (tidymodels/parsnip@ab42409) +#> prodlim * 2023.03.31 2023-04-02 [1] CRAN (R 4.2.0) +#> purrr * 1.0.1 2023-01-10 [1] CRAN (R 4.2.0) +#> recipes * 1.0.6 2023-04-25 [1] CRAN (R 4.2.0) +#> rlang 1.1.1 2023-04-28 [1] CRAN (R 4.2.0) +#> rsample * 1.1.1 2022-12-07 [1] CRAN (R 4.2.0) +#> tibble * 3.2.1 2023-03-20 [1] CRAN (R 4.2.0) +#> tidymodels * 1.1.0 2023-05-01 [1] CRAN (R 4.2.0) +#> tune * 1.1.1.9001 2023-05-16 [1] Github (tidymodels/tune@fdc0016) +#> workflows * 1.1.3 2023-02-22 [1] CRAN (R 4.2.0) +#> yardstick * 1.2.0 2023-04-21 [1] CRAN (R 4.2.0) +#> +#> [1] /Library/Frameworks/R.framework/Versions/4.2/Resources/library +#> +#> ──────────────────────────────────────────────────────────────────── +``` + From 5c869d5f47b536ba0f8efbe9aaefade4359a3ecc Mon Sep 17 00:00:00 2001 From: Hannah Frick Date: Mon, 22 May 2023 10:24:44 +0100 Subject: [PATCH 3/6] typos --- content/learn/statistics/survival-ipcw/index.Rmarkdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/content/learn/statistics/survival-ipcw/index.Rmarkdown b/content/learn/statistics/survival-ipcw/index.Rmarkdown index 99d3f9d8..cdab9db2 100644 --- a/content/learn/statistics/survival-ipcw/index.Rmarkdown +++ b/content/learn/statistics/survival-ipcw/index.Rmarkdown @@ -32,11 +32,11 @@ pak::pak(c("tidymodels/censored", "tidymodels/parsnip")) ## Introduction -One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call the dynamic performance metrics. +One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call them dynamic performance metrics. Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's [another article]() though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). -The section below discusses the practical aspects of moving to a binary outcome and the statistical implications. Before we start, though, let's define the various types of times that will be mentioned: +The sections below discuss the practical aspects of moving to a binary outcome and the statistical implications. To start, let's define the various types of times that will be mentioned: - Observed time: time recorded in the data - Event time: observed times for actual events @@ -75,7 +75,7 @@ bag_tree_fit <- bag_tree_fit ``` -Using this model, we'll can make predictions of different types. +Using this model, we can make predictions of different types. ## Survival Probability Prediction @@ -200,7 +200,7 @@ dyn_val_pred %>% The observed event time was `r round(parsnip:::.extract_surv_time(sim_val$event_time[1]), 3)`. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. -We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring is computed just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. +We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the model to include covariates (as well as models beyond the RKM). @@ -322,7 +322,7 @@ There's more on dynamic performance metrics in [another article](). ## Summary -When converting censored event time data to a binary format, the main point to remember are: +When converting censored event time data to a binary format, the main points to remember are: * Some data points cannot be used in the calculations. * To properly estimate statistical quantities, we weight the computations by the inverse of the probability of being censored. From 72ecf178eb619c79fb614c8b6e146b535fd81d14 Mon Sep 17 00:00:00 2001 From: Hannah Frick Date: Mon, 22 May 2023 16:39:33 +0100 Subject: [PATCH 4/6] Apply suggestions from code review --- .../statistics/survival-ipcw/index.Rmarkdown | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/content/learn/statistics/survival-ipcw/index.Rmarkdown b/content/learn/statistics/survival-ipcw/index.Rmarkdown index cdab9db2..e6c7e7d9 100644 --- a/content/learn/statistics/survival-ipcw/index.Rmarkdown +++ b/content/learn/statistics/survival-ipcw/index.Rmarkdown @@ -24,7 +24,7 @@ options(pillar.advice = FALSE, pillar.min_title_chars = Inf) `r req_pkgs(pkgs)` You'll need the development versions of censored and parsnip. To install these, use ```r -library(pak) +#install.packages(pak) pak::pak(c("tidymodels/censored", "tidymodels/parsnip")) ``` @@ -34,7 +34,7 @@ pak::pak(c("tidymodels/censored", "tidymodels/parsnip")) One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call them dynamic performance metrics. -Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's [another article]() though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). +Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's [another article](FIXME) though). In other words, for a given time `t` for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). The sections below discuss the practical aspects of moving to a binary outcome and the statistical implications. To start, let's define the various types of times that will be mentioned: @@ -42,7 +42,7 @@ The sections below discuss the practical aspects of moving to a binary outcome a - Event time: observed times for actual events - Evaluation time: the time, specified by the analyst, that the model is evaluated. -For example, we'll simulate some data using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=) using the prodlim package. A training and validation sets are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: +As an example, we'll simulate some data with the prodlim package, using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=). A training and a validation set are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: ```{r} #| label: data @@ -87,7 +87,7 @@ predict(object, new_data, type = "survival", eval_time = numeric()) where `eval_time` is a vector of time points at which we want the corresponding survivor function estimates. Alternatively, we can use the `augment()` function to get both types of prediction and automatically attach them to the data. -The largest event time in the training set is `r round(max(sim_tr$event_time[,1]), 3)` so we will use a set of evaluation times between zero and 18. From there, we use `augment()`: +The largest event time in the training set is `r round(max(sim_tr$event_time[,1]), 3)` so we will use a set of evaluation times between zero and 18. ```{r} #| label: val-pred @@ -129,13 +129,13 @@ dyn_val_pred %>% ## Converting censored data to binary data -We follow the process described by [Graf _et al_ (1999)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Assessment+and+Comparison+of+Prognostic+Classification+Schemes+for+Survival+Data.%22&btnG=) where observations at evaluation time `t` are categorized into three groups. +To assess model performance at evaluation time `t`, we turn the observed event time data into a binary “was there an event at time t?” version. For this, we follow the process described by [Graf _et al_ (1999)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Assessment+and+Comparison+of+Prognostic+Classification+Schemes+for+Survival+Data.%22&btnG=) where observations at evaluation time `t` are categorized into three groups. - - **Definitive non-events**: event times are less than the evaluation time ("it hasn't happened yet") + - **Definitive non-events**: event times are less than the evaluation time ("it has already happened") - **Definitive events**: Observed times (censored or not) are greater than the evaluation time ("it happens sometime after now"). - **Ambiguous outcomes**: Observed censored time is less than the evaluation time ("maybe it happens, maybe not"). -We can use the first two sets of data to compute binary performance metrics, but the third set cannot be used. Note that the size of the third group is likely to grow as the evaluation time increases. This means we'll have fewer data points to evaluate with late evaluation times. This implies that the variation in the metrics will be considerable as time goes on. For our simulated training set: +We can use the first two sets of data to compute binary performance metrics, but the third set cannot be used. Note that the size of the third group is likely to grow as the evaluation time increases. This means we'll have fewer data points to evaluate with late evaluation times. This implies that the variation in the metrics will be considerable as evaluation time goes on. For our simulated training set: ```{r} #| label: usable-data @@ -143,11 +143,11 @@ dyn_val_pred %>% summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% ggplot() + geom_step(aes(.eval_time, num_usable)) + - labs(x = "time", y = "Number of Usable Samples") + + labs(x = "time", y = "number of usable samples") + lims(y = c(0, nrow(sim_val))) ``` -Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). +Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf _et al_ took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). How do we compute this probability? The standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a reasonably reliable non-parametric model for estimating the probability of being censored at a given time. @@ -184,7 +184,7 @@ For any dynamic computations, we multiply the case's contributions by the invers ### Some computational details -First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf suggests (as it seems most appropriate): +First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf _et al_ suggest (as it seems most appropriate): - If the observed time corresponds to an actual event, and that time is before the evaluation time, the probability of being censored is predicted at the observed time. - If the observed time is after the evaluation time, regardless of the status, the probability of being censored is predicted at the evaluation time. @@ -198,11 +198,11 @@ dyn_val_pred %>% select(event_time, .eval_time, .weight_time, .pred_censored, .weight_censored) ``` -The observed event time was `r round(parsnip:::.extract_surv_time(sim_val$event_time[1]), 3)`. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. +This observation was an event, observed at time `r round(parsnip:::.extract_surv_time(sim_val$event_time[1]), 3)`. The column `.weight_time` captures at which time the probability of censoring was calculated. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. -Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the model to include covariates (as well as models beyond the RKM). +Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the censoring model to include covariates (as well as models beyond the RKM). ```{r} # TODO I'm going to add this to parsnip @@ -232,7 +232,7 @@ binary_encoding <- ) ``` -While another article delves into the details of performance metrics for censored data, we'll look at the 2x2 confusion matrices at a few time points. +To illustrate how the IPCW are used in calculating dynamic performance metrics, we'll take a look here at the 2x2 confusion matrices at a few time points. More details on performance metrics for censored data are in a separate [tidymodels.org article](FIXME). Let's start with an evaluation time of 1.00. Ordinarily, we would simply use: @@ -242,7 +242,7 @@ binary_encoding %>% conf_mat(truth = obs_class, estimate = pred_class) ``` -to compute the metric. This would ignore the censoring weights so we'll add the `case_weights` argument: +to compute the metric. This would ignore the censoring weights so we'll include them via the `case_weights` argument: ```{r} #| label: conf-mat-01 @@ -263,7 +263,7 @@ events_01 <- nrow(dat_01 %>% filter(event_time[,1] <= 1)) The values in the cells are the sum of the censoring weights, There are `r events_01` actual events (out of `r nrow(dat_01)` usable values) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so there inverse values are also. -This early, performance looks very good but that is mostly since there are few events and they are all predicted well (at least with the default 50% cutoff). +This early, performance looks very good but that is mostly because there are few events. Let's shift to an evaluation time of 5.0. From ad2f676cadde73847c7872bb7fa7fb2501e0de91 Mon Sep 17 00:00:00 2001 From: Hannah Frick Date: Mon, 5 Jun 2023 16:42:43 +0100 Subject: [PATCH 5/6] focus on conversion to binary and weights --- .../statistics/survival-ipcw/figs/RKM-1.svg | 2261 +++++++++++------ .../figs/plot-graf-categories-1.svg | 212 ++ .../survival-ipcw/figs/surv-plot-1.svg | 84 - .../survival-ipcw/figs/usable-data-1.svg | 43 +- .../statistics/survival-ipcw/index.Rmarkdown | 197 +- .../statistics/survival-ipcw/index.markdown | 244 +- 6 files changed, 1987 insertions(+), 1054 deletions(-) create mode 100644 content/learn/statistics/survival-ipcw/figs/plot-graf-categories-1.svg delete mode 100644 content/learn/statistics/survival-ipcw/figs/surv-plot-1.svg diff --git a/content/learn/statistics/survival-ipcw/figs/RKM-1.svg b/content/learn/statistics/survival-ipcw/figs/RKM-1.svg index 3f4b484b..5552accd 100644 --- a/content/learn/statistics/survival-ipcw/figs/RKM-1.svg +++ b/content/learn/statistics/survival-ipcw/figs/RKM-1.svg @@ -34,770 +34,1521 @@ - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + @@ -812,13 +1563,15 @@ - - - + + + + 0 -5 -10 -15 +5 +10 +15 +20 time probability of being censored diff --git a/content/learn/statistics/survival-ipcw/figs/plot-graf-categories-1.svg b/content/learn/statistics/survival-ipcw/figs/plot-graf-categories-1.svg new file mode 100644 index 00000000..9af78453 --- /dev/null +++ b/content/learn/statistics/survival-ipcw/figs/plot-graf-categories-1.svg @@ -0,0 +1,212 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +1 + + + + + + + + + + +3 + + + + + + + + + + +5 + + + + + + +0 +2 +4 +6 + + + + +0 +2 +4 +6 + + + + +0 +2 +4 +6 + + + + + +Time +Sample + +Status + + + + + + + + + + + + +Observation: censored +Observation: event +Evaluation: non-event +Evaluation: event + + diff --git a/content/learn/statistics/survival-ipcw/figs/surv-plot-1.svg b/content/learn/statistics/survival-ipcw/figs/surv-plot-1.svg deleted file mode 100644 index a083b00b..00000000 --- a/content/learn/statistics/survival-ipcw/figs/surv-plot-1.svg +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -0.00 -0.25 -0.50 -0.75 -1.00 - - - - - - - - - -0 -5 -10 -15 -time -survival probability - - diff --git a/content/learn/statistics/survival-ipcw/figs/usable-data-1.svg b/content/learn/statistics/survival-ipcw/figs/usable-data-1.svg index 32570a00..e0e78504 100644 --- a/content/learn/statistics/survival-ipcw/figs/usable-data-1.svg +++ b/content/learn/statistics/survival-ipcw/figs/usable-data-1.svg @@ -35,10 +35,10 @@ - - - - + + + + @@ -46,19 +46,20 @@ - - - - + + + + + 0 -50 -100 -150 -200 -250 +100 +200 +300 +400 +500 @@ -66,14 +67,16 @@ - - - + + + + 0 -5 -10 -15 +5 +10 +15 +20 time -Number of Usable Samples +number of usable observations diff --git a/content/learn/statistics/survival-ipcw/index.Rmarkdown b/content/learn/statistics/survival-ipcw/index.Rmarkdown index e6c7e7d9..11b24185 100644 --- a/content/learn/statistics/survival-ipcw/index.Rmarkdown +++ b/content/learn/statistics/survival-ipcw/index.Rmarkdown @@ -1,5 +1,5 @@ --- -title: "Understanding Inverse Probability of Censoring Weights (IPCW)" +title: "Accounting for Censoring in Performance Metrics for Event Time Data" tags: [survival,censored,parsnip] categories: [statistical analysis] type: learn-subsection @@ -24,24 +24,28 @@ options(pillar.advice = FALSE, pillar.min_title_chars = Inf) `r req_pkgs(pkgs)` You'll need the development versions of censored and parsnip. To install these, use ```r -#install.packages(pak) +#install.packages("pak") pak::pak(c("tidymodels/censored", "tidymodels/parsnip")) ``` ## Introduction - One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call them dynamic performance metrics. -Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's [another article](FIXME) though). In other words, for a given time `t` for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). +Many dynamic metrics are similar to those used for binary classification models. Examples include the Brier score and ROC curves (see the [Dynamic Performance Metrics for Event Time Data](FIXME) article for details). The basic idea is that, for a given time $t$ for model evaluation, we try to encode the observed event time data into a binary "has there been an event at time $t$?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). The survival versions of these metrics need those binary versions of observed truth and predictions as well as a way to account for censoring. + +Censoring plays into the details of the conversion and is additionally captured in the form of weights. This article covers both those aspects in detail to complement the [main article](FIXME) on performance metrics for event time data. -The sections below discuss the practical aspects of moving to a binary outcome and the statistical implications. To start, let's define the various types of times that will be mentioned: +To start, let's define the various types of times that will be mentioned: - Observed time: time recorded in the data - Event time: observed times for actual events - Evaluation time: the time, specified by the analyst, that the model is evaluated. + +## Example data + As an example, we'll simulate some data with the prodlim package, using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=). A training and a validation set are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: ```{r} @@ -52,7 +56,7 @@ library(censored) library(prodlim) set.seed(5882) -sim_dat <- SimSurv(1000) %>% +sim_dat <- SimSurv(2000) %>% mutate(event_time = Surv(time, event)) %>% select(event_time, X1, X2) @@ -66,6 +70,7 @@ We'll need a model to illustrate the code and concepts. Let's fit a bagged survi ```{r} #| label: bag-tree-fit + set.seed(17) bag_tree_fit <- bag_tree() %>% @@ -75,30 +80,18 @@ bag_tree_fit <- bag_tree_fit ``` -Using this model, we can make predictions of different types. - -## Survival Probability Prediction - -This censored regression model can make static predictions via the predicted event time using `predict(object, type = "time")`. It can also create dynamic predictions regarding the probability of survival for each data point at specific times. The syntax for this is - -```r -predict(object, new_data, type = "survival", eval_time = numeric()) -``` - -where `eval_time` is a vector of time points at which we want the corresponding survivor function estimates. Alternatively, we can use the `augment()` function to get both types of prediction and automatically attach them to the data. - -The largest event time in the training set is `r round(max(sim_tr$event_time[,1]), 3)` so we will use a set of evaluation times between zero and 18. +Using this model, we can make predictions of different types and `augment()` provides us with a version of the data augmented with the various predictions. Here we are interested in the predicted probability of survival at different evaluation time points. The largest event time in the training set is `r round(max(sim_tr$event_time[,1]), 3)` so we will use a set of evaluation times between zero and 21. ```{r} #| label: val-pred -time_points <- seq(0, 18, by = 0.25) +time_points <- seq(0, 21, by = 0.25) val_pred <- augment(bag_tree_fit, sim_val, eval_time = time_points) val_pred ``` -The `.pred` column contains the dynamic predictions in a list column. Since `length(time_points)` is `r length(time_points)`, each data frame in the list column has that many rows: +The observed data are in the `event_time` column. The predicted survival probabilities are in the `.pred` column. This is a list column with a data frame for each observation, containing the predictions at the `r length(time_points)` evaluation time points in the (nested) column `.pred_survival`. ```{r} #| label: val-pred-dynamic @@ -106,50 +99,83 @@ The `.pred` column contains the dynamic predictions in a list column. Since `len val_pred$.pred[[1]] ``` -The `.pred_survival` column has the predictions. We'll see what the other columns represent in the following few sections. - -Let's plot the estimated survivor curves for the first 10 data points in the validation set: - -```{r} -#| label: surv-plot - -# Unnest the list columns: -dyn_val_pred <- - val_pred %>% - select(.pred, event_time) %>% - add_rowindex() %>% - unnest(.pred) +First, let's dive into how to convert the observed event time in `event_time` to a binary version. Then we will discuss the remaining columns as part of generating the required weights for the dynamic performance metrics. -dyn_val_pred %>% - filter(.row <= 10) %>% - ggplot(aes(.eval_time, .pred_survival, group = .row, col = factor(.row))) + - geom_step(direction = "vh", linewidth = 1, alpha = 1 / 2, show.legend = FALSE) + - labs(x = "time", y = "survival probability") -``` ## Converting censored data to binary data -To assess model performance at evaluation time `t`, we turn the observed event time data into a binary “was there an event at time t?” version. For this, we follow the process described by [Graf _et al_ (1999)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Assessment+and+Comparison+of+Prognostic+Classification+Schemes+for+Survival+Data.%22&btnG=) where observations at evaluation time `t` are categorized into three groups. +To assess model performance at evaluation time $t$, we turn the observed event time data into a binary “was there an event at time $t$?” version. For this, we follow the process described by [Graf _et al_ (1999)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Assessment+and+Comparison+of+Prognostic+Classification+Schemes+for+Survival+Data.%22&btnG=) where observations are categorized into three groups, at evaluation time $t$. - - **Definitive non-events**: event times are less than the evaluation time ("it has already happened") - - **Definitive events**: Observed times (censored or not) are greater than the evaluation time ("it happens sometime after now"). - - **Ambiguous outcomes**: Observed censored time is less than the evaluation time ("maybe it happens, maybe not"). + - **Category 1 - Events**: Evaluation time is greater than or equal to the event time ("it has already happened"). + - **Category 2 - Non-events**: Evaluation time is less than the observed time, censored or not ("nothing has happened yet"). + - **Category 3 - Ambiguous outcomes**: Evaluation time is greater than or equal to the observed censored time ("we don't know if anything might have happened by now"). -We can use the first two sets of data to compute binary performance metrics, but the third set cannot be used. Note that the size of the third group is likely to grow as the evaluation time increases. This means we'll have fewer data points to evaluate with late evaluation times. This implies that the variation in the metrics will be considerable as evaluation time goes on. For our simulated training set: +We can use the first two categories of data to compute binary performance metrics, but the third category cannot be used. So our usable sample size changes with the evaluation time. ```{r} -#| label: usable-data -dyn_val_pred %>% - summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% - ggplot() + - geom_step(aes(.eval_time, num_usable)) + - labs(x = "time", y = "number of usable samples") + - lims(y = c(0, nrow(sim_val))) +#| label: plot-graf-categories +#| echo: false +#| warning: false +#| fig.width: 9 +obs_time <- c(4, 2) +obs_status <- c("censored", "event") + +df1 <- tibble::tibble( + obs_id = 1:2, + obs_time = obs_time, + obs_status = obs_status, + eval_time = 1, + eval_status = c("censored", "censored") +) +df2 <- tibble::tibble( + obs_id = 1:2, + obs_time = obs_time, + obs_status = obs_status, + eval_time = 3, + eval_status = c("censored", "event") +) +df3 <- tibble::tibble( + obs_id = 1:2, + obs_time = obs_time, + obs_status = obs_status, + eval_time = 5, + eval_status = c(NA, "event") +) +df <- bind_rows(df1, df2, df3) + +pch_dot_empty <- 1 +pch_dot_solid <- 19 +pch_triangle_empty <- 2 +pch_triangle_solid <- 17 + +df %>% + dplyr::mutate( + obs_status = dplyr::if_else(obs_status == "censored", pch_dot_empty, pch_dot_solid), + eval_status = dplyr::if_else(eval_status == "censored", pch_triangle_empty, pch_triangle_solid) + ) %>% + ggplot() + + geom_point(aes(obs_time, obs_id, shape = obs_status, size = I(5))) + + geom_segment(aes(x = rep(0, 6), y = obs_id, xend = obs_time, yend = obs_id)) + + geom_vline(aes(xintercept = eval_time, col = I("red"), linetype = I("dashed"), linewidth = I(0.8))) + + geom_point(aes(eval_time, obs_id, shape = eval_status, col = I("red"), size = I(5))) + + scale_shape_identity("Status", + labels = c("Observation: censored", "Observation: event", + "Evaluation: non-event", "Evaluation: event"), + breaks = c(1, 19, 2, 17), + guide = "legend") + + scale_x_continuous(limits = c(0, 7)) + + scale_y_continuous(limits = c(0.5, 2.5)) + + labs(x = "Time", y = "Sample") + + theme_bw() + + theme(axis.text.y = element_blank(), legend.position = "top") + + facet_grid(~ eval_time) ``` -Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf _et al_ took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). +## Censoring weights + +Unfortunately, this categorization scheme alone is not sufficient to compute metrics. Graf _et al_ took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). This is not the probability than the original time-to-event data point is censored but rather the probability that at evaluation time, we have not observed an event (or a censoring) yet, i.e., that the data point falls into category 2. -How do we compute this probability? The standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a reasonably reliable non-parametric model for estimating the probability of being censored at a given time. +How do we compute this probability? The standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of survival. For the reverse Kaplan-Meier curve, the meaning of the status indicator is flipped, i.e., the event of interest changes from "death" to "censoring". This should give us a reasonably reliable non-parametric model for estimating the probability of being censored at a given time. Every time a censored regression model is created using tidymodels, the RKM is estimated on the same data used to fit the model and attached to the parsnip object. @@ -159,6 +185,12 @@ For our simulated data, here is what the RKM curve looks like: #| label: RKM #| echo : false +dyn_val_pred <- + val_pred %>% + select(.pred, event_time) %>% + add_rowindex() %>% + unnest(.pred) + dyn_val_pred %>% select(.weight_time, .pred_censored) %>% distinct() %>% @@ -178,32 +210,46 @@ dyn_val_pred %>% labs(x = "time", y = "probability of being censored") ``` -The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations. +The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations. As (evaluation) time increases, we pass more and more observed time points, and the probability of being censored, i.e., the probability of an observation to fall into category 2, decreases. -For any dynamic computations, we multiply the case's contributions by the inverse probability of being censored. This is called the inverse probability of censoring weights (IPCW). This should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations. +The weights used in the calculation of the dynamic performance metrics are the inverse of these probabilities, hence the name "inverse probability of censoring weights" (IPCW). These weights should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations. -### Some computational details +### The finer details -First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf _et al_ suggest (as it seems most appropriate): +First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf _et al_ suggest (as it seems most appropriate): - - If the observed time corresponds to an actual event, and that time is before the evaluation time, the probability of being censored is predicted at the observed time. - - If the observed time is after the evaluation time, regardless of the status, the probability of being censored is predicted at the evaluation time. +- If the evaluation time is less than the observed time (like in category 2), the evaluation time is used to predict the probability of censoring. +- If the evaluation is greater than or equal to the event time (like in category 1), the event time is used to predict the probability of censoring. +- If the evaluation time is greater than or equal to the observed censoring time, the observation falls into category 3 and is not used, i.e., also no weight is needed. -Here's an example using the first data point in the validation set: +Graf _et al_ call this time at which to predict the probability of censoring the _weight time_. Here's an example using the first data point in the validation set: ```{r} #| label: eval-time-censored + +dyn_val_pred <- + val_pred %>% + select(.pred, event_time) %>% + add_rowindex() %>% + unnest(.pred) + dyn_val_pred %>% - filter(.row == 1 & .eval_time %in% c(1, 5, 5.75, 10, 15)) %>% + filter(.row == 1 & .eval_time %in% c(1, 2, 4, 5, 10)) %>% select(event_time, .eval_time, .weight_time, .pred_censored, .weight_censored) ``` -This observation was an event, observed at time `r round(parsnip:::.extract_surv_time(sim_val$event_time[1]), 3)`. The column `.weight_time` captures at which time the probability of censoring was calculated. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. +This observation was an event, observed at time `r round(parsnip:::.extract_surv_time(sim_val$event_time[1]), 3)`. The column `.weight_time` captures at which time the probability of censoring was calculated. Up until that event time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. -We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. +We add a slight modification to the weight time: If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring just before the requested weight time. We are basically subtracting a small numerical value from the weight time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the censoring model to include covariates (as well as models beyond the RKM). +## Illustration: Confusion matrix + +To illustrate how these two tools for accounting for censoring are used in calculating dynamic performance metrics, we'll take a look here at the 2x2 confusion matrices at a few evaluation time points. More details on performance metrics for censored data are in the aforementioned [Dynamic Performance Metrics for Event Time Data](FIXME) article. + +First, let's turn the observed event time data and the predictions into their binary versions. + ```{r} # TODO I'm going to add this to parsnip time_as_binary_event <- function(surv, eval_time) { @@ -232,9 +278,19 @@ binary_encoding <- ) ``` -To illustrate how the IPCW are used in calculating dynamic performance metrics, we'll take a look here at the 2x2 confusion matrices at a few time points. More details on performance metrics for censored data are in a separate [tidymodels.org article](FIXME). +Remember how observations falling into category 3 are removed from the analysis? This means we'll likely have fewer data points to evaluate as the evaluation time increases. This implies that the variation in the metrics will be considerable as evaluation time goes on. For our simulated training set: + +```{r} +#| label: usable-data +dyn_val_pred %>% + summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% + ggplot() + + geom_step(aes(.eval_time, num_usable)) + + labs(x = "time", y = "number of usable observations") + + lims(y = c(0, nrow(sim_val))) +``` -Let's start with an evaluation time of 1.00. Ordinarily, we would simply use: +Keeping this in mind, let's look at what happens with the data points we can use. Let's start with an evaluation time of 1.00. To compute the confusion matrix for a classification problem, we would simply use: ```r binary_encoding %>% @@ -242,7 +298,7 @@ binary_encoding %>% conf_mat(truth = obs_class, estimate = pred_class) ``` -to compute the metric. This would ignore the censoring weights so we'll include them via the `case_weights` argument: +For censored regression problems, we need to additionally use the censoring weights so we'll include them via the `case_weights` argument: ```{r} #| label: conf-mat-01 @@ -261,7 +317,7 @@ dat_01 <- binary_encoding %>% filter(.eval_time == 1.00 & !is.na(.weight_censore events_01 <- nrow(dat_01 %>% filter(event_time[,1] <= 1)) ``` -The values in the cells are the sum of the censoring weights, There are `r events_01` actual events (out of `r nrow(dat_01)` usable values) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so there inverse values are also. +The values in the cells are the sum of the censoring weights, There are `r events_01` actual events (out of `r nrow(dat_01)` usable observations) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so their inverse values are also. This early, performance looks very good but that is mostly because there are few events. @@ -318,12 +374,13 @@ stats_17 <- The data are overwhelmingly events. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is `r round(mean(dat_17$.weight_censored), 2)`. -There's more on dynamic performance metrics in [another article](). +This concludes the illustration of how to account for censoring when using a confusion matrix for performance assessment. There's more on dynamic performance metrics in the [Dynamic Performance Metrics for Event Time Data](FIXME) article. ## Summary -When converting censored event time data to a binary format, the main points to remember are: +When accounting for censoring in dynamic performance metrics, the main points to remember are: +* Event time data can be converted to a binary format. * Some data points cannot be used in the calculations. * To properly estimate statistical quantities, we weight the computations by the inverse of the probability of being censored. * tidymodels currently assumes non-informative censoring. diff --git a/content/learn/statistics/survival-ipcw/index.markdown b/content/learn/statistics/survival-ipcw/index.markdown index 6c5d257b..b10b360c 100644 --- a/content/learn/statistics/survival-ipcw/index.markdown +++ b/content/learn/statistics/survival-ipcw/index.markdown @@ -1,5 +1,5 @@ --- -title: "Understanding Inverse Probability of Censoring Weights (IPCW)" +title: "Accounting for Censoring in Performance Metrics for Event Time Data" tags: [survival,censored,parsnip] categories: [statistical analysis] type: learn-subsection @@ -16,25 +16,29 @@ description: | To use the code in this article, you will need to install the following packages: censored, prodlim, and tidymodels. You'll need the development versions of censored and parsnip. To install these, use ```r -library(pak) +#install.packages("pak") pak::pak(c("tidymodels/censored", "tidymodels/parsnip")) ``` ## Introduction +One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call them dynamic performance metrics. -One trend in modern survival analysis is to compute time-dependent measures of performance. These are primarily driven by an increased focus on predictions for the probability of survival at a given time (as opposed to the predictions of event times or linear predictors). Since these are conditional on the time of evaluation, we call the dynamic performance metrics. +Many dynamic metrics are similar to those used for binary classification models. Examples include the Brier score and ROC curves (see the [Dynamic Performance Metrics for Event Time Data](FIXME) article for details). The basic idea is that, for a given time `\(t\)` for model evaluation, we try to encode the observed event time data into a binary "has there been an event at time `\(t\)`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). The survival versions of these metrics need those binary versions of observed truth and predictions as well as a way to account for censoring. -Many dynamic metrics are similar to, if not the same, as those used in binary classification models. Examples include the Brier score and ROC curves (that's [another article]() though). In other words, at a given time for model evaluation, we try to encode the observed event time data into a binary "was there an event at time `t`?" version. We also convert the predicted survival probabilities into predicted events/non-events based on a threshold (default is 0.50). +Censoring plays into the details of the conversion and is additionally captured in the form of weights. This article covers both those aspects in detail to complement the [main article](FIXME) on performance metrics for event time data. -The section below discusses the practical aspects of moving to a binary outcome and the statistical implications. Before we start, though, let's define the various types of times that will be mentioned: +To start, let's define the various types of times that will be mentioned: - Observed time: time recorded in the data - Event time: observed times for actual events - Evaluation time: the time, specified by the analyst, that the model is evaluated. -For example, we'll simulate some data using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=) using the prodlim package. A training and validation sets are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: + +## Example data + +As an example, we'll simulate some data with the prodlim package, using the methods of [Bender _et al_ (2005)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Generating+survival+times+to+simulate+Cox+proportional+hazards+models.%22&btnG=). A training and a validation set are simulated. We'll also load the censored package so that we can fit a model to these time-to-event data: ```r @@ -44,7 +48,7 @@ library(censored) library(prodlim) set.seed(5882) -sim_dat <- SimSurv(1000) %>% +sim_dat <- SimSurv(2000) %>% mutate(event_time = Surv(time, event)) %>% select(event_time, X1, X2) @@ -73,110 +77,71 @@ bag_tree_fit #> Call: bagging.data.frame(formula = event_time ~ ., data = data) ``` -Using this model, we'll can make predictions of different types. +Using this model, we can make predictions of different types and `augment()` provides us with a version of the data augmented with the various predictions. Here we are interested in the predicted probability of survival at different evaluation time points. The largest event time in the training set is 21.083 so we will use a set of evaluation times between zero and 21. -## Survival Probability Prediction - -This censored regression model can make static predictions via the predicted event time using `predict(object, type = "time")`. It can also create dynamic predictions regarding the probability of survival for each data point at specific times. The syntax for this is ```r -predict(object, new_data, type = "survival", eval_time = numeric()) -``` - -where `eval_time` is a vector of time points at which we want the corresponding survivor function estimates. Alternatively, we can use the `augment()` function to get both types of prediction and automatically attach them to the data. - -The largest event time in the training set is 18.1 so we will use a set of evaluation times between zero and 18. From there, we use `augment()`: - - -```r -time_points <- seq(0, 18, by = 0.25) +time_points <- seq(0, 21, by = 0.25) val_pred <- augment(bag_tree_fit, sim_val, eval_time = time_points) val_pred -#> # A tibble: 250 × 5 +#> # A tibble: 500 × 5 #> .pred .pred_time event_time X1 X2 #> -#> 1 9.48 5.78 1 -0.630 -#> 2 6.67 3.26+ 0 0.792 -#> 3 3.82 2.34 1 0.811 -#> 4 8.53 7.45+ 1 -0.271 -#> 5 7.07 8.05 0 0.315 -#> 6 7.24 14.09 0 0.264 -#> 7 10.6 5.23+ 0 -0.532 -#> 8 12.3 3.18+ 0 -1.41 -#> 9 11.0 1.86 1 -0.851 -#> 10 6.03 7.20 1 -0.0937 -#> # ℹ 240 more rows +#> 1 6.66 4.83 1 -0.630 +#> 2 6.66 6.11 1 -0.606 +#> 3 7.47 6.60+ 1 -1.03 +#> 4 3.29 2.72 1 0.811 +#> 5 5.10 4.73+ 1 -0.376 +#> 6 4.99 8.70 0 1.18 +#> 7 7.23 10.82 1 -0.851 +#> 8 6.46 6.89 0 0.493 +#> 9 4.75 2.45+ 1 0.0207 +#> 10 13.4 8.23+ 0 -1.52 +#> # ℹ 490 more rows ``` -The `.pred` column contains the dynamic predictions in a list column. Since `length(time_points)` is 73, each data frame in the list column has that many rows: +The observed data are in the `event_time` column. The predicted survival probabilities are in the `.pred` column. This is a list column with a data frame for each observation, containing the predictions at the 85 evaluation time points in the (nested) column `.pred_survival`. ```r val_pred$.pred[[1]] -#> # A tibble: 73 × 5 +#> # A tibble: 85 × 5 #> .eval_time .pred_survival .weight_time .pred_censored .weight_censored #> #> 1 0 1 0 1 1 -#> 2 0.25 0.996 0.250 1 1 -#> 3 0.5 0.993 0.500 0.997 1.00 -#> 4 0.75 0.993 0.750 0.995 1.01 -#> 5 1 0.989 1.00 0.992 1.01 -#> 6 1.25 0.984 1.25 0.985 1.02 -#> 7 1.5 0.975 1.50 0.978 1.02 -#> 8 1.75 0.974 1.75 0.969 1.03 -#> 9 2 0.964 2.00 0.956 1.05 -#> 10 2.25 0.949 2.25 0.944 1.06 -#> # ℹ 63 more rows +#> 2 0.25 1 0.250 0.999 1.00 +#> 3 0.5 0.999 0.500 0.996 1.00 +#> 4 0.75 0.992 0.750 0.993 1.01 +#> 5 1 0.988 1.00 0.991 1.01 +#> 6 1.25 0.980 1.25 0.987 1.01 +#> 7 1.5 0.972 1.50 0.981 1.02 +#> 8 1.75 0.959 1.75 0.971 1.03 +#> 9 2 0.938 2.00 0.966 1.04 +#> 10 2.25 0.925 2.25 0.959 1.04 +#> # ℹ 75 more rows ``` -The `.pred_survival` column has the predictions. We'll see what the other columns represent in the following few sections. - -Let's plot the estimated survivor curves for the first 10 data points in the validation set: - - -```r -# Unnest the list columns: -dyn_val_pred <- - val_pred %>% - select(.pred, event_time) %>% - add_rowindex() %>% - unnest(.pred) - -dyn_val_pred %>% - filter(.row <= 10) %>% - ggplot(aes(.eval_time, .pred_survival, group = .row, col = factor(.row))) + - geom_step(direction = "vh", linewidth = 1, alpha = 1 / 2, show.legend = FALSE) + - labs(x = "time", y = "survival probability") -``` +First, let's dive into how to convert the observed event time in `event_time` to a binary version. Then we will discuss the remaining columns as part of generating the required weights for the dynamic performance metrics. - ## Converting censored data to binary data -We follow the process described by [Graf _et al_ (1999)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Assessment+and+Comparison+of+Prognostic+Classification+Schemes+for+Survival+Data.%22&btnG=) where observations at evaluation time `t` are categorized into three groups. +To assess model performance at evaluation time `\(t\)`, we turn the observed event time data into a binary “was there an event at time `\(t\)`?” version. For this, we follow the process described by [Graf _et al_ (1999)](https://scholar.google.com/scholar?hl=en&as_sdt=0%2C7&q=%22Assessment+and+Comparison+of+Prognostic+Classification+Schemes+for+Survival+Data.%22&btnG=) where observations are categorized into three groups, at evaluation time `\(t\)`. - - **Definitive non-events**: event times are less than the evaluation time ("it hasn't happened yet") - - **Definitive events**: Observed times (censored or not) are greater than the evaluation time ("it happens sometime after now"). - - **Ambiguous outcomes**: Observed censored time is less than the evaluation time ("maybe it happens, maybe not"). + - **Category 1 - Events**: Evaluation time is greater than or equal to the event time ("it has already happened"). + - **Category 2 - Non-events**: Evaluation time is less than the observed time, censored or not ("nothing has happened yet"). + - **Category 3 - Ambiguous outcomes**: Evaluation time is greater than or equal to the observed censored time ("we don't know if anything might have happened by now"). -We can use the first two sets of data to compute binary performance metrics, but the third set cannot be used. Note that the size of the third group is likely to grow as the evaluation time increases. This means we'll have fewer data points to evaluate with late evaluation times. This implies that the variation in the metrics will be considerable as time goes on. For our simulated training set: +We can use the first two categories of data to compute binary performance metrics, but the third category cannot be used. So our usable sample size changes with the evaluation time. + -```r -dyn_val_pred %>% - summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% - ggplot() + - geom_step(aes(.eval_time, num_usable)) + - labs(x = "time", y = "Number of Usable Samples") + - lims(y = c(0, nrow(sim_val))) -``` - - +## Censoring weights -Unfortunately, the categorization scheme shown above is not sufficient to compute metrics. Graf took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). +Unfortunately, this categorization scheme alone is not sufficient to compute metrics. Graf _et al_ took a page from the causal inference literature and added a propensity-type score based on the likelihood that each data point would be censored (regardless of the observed event status). This is not the probability than the original time-to-event data point is censored but rather the probability that at evaluation time, we have not observed an event (or a censoring) yet, i.e., that the data point falls into category 2. -How do we compute this probability? The standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of an event (e.g., survival). The reverse curve models the probability of censoring (e.g., modeling non-events). This should give us a reasonably reliable non-parametric model for estimating the probability of being censored at a given time. +How do we compute this probability? The standard approach is to compute a "reverse Kaplan-Meier" (RKM) curve. Ordinarily, the Kaplan-Meier (KM) curve models the probability of survival. For the reverse Kaplan-Meier curve, the meaning of the status indicator is flipped, i.e., the event of interest changes from "death" to "censoring". This should give us a reasonably reliable non-parametric model for estimating the probability of being censored at a given time. Every time a censored regression model is created using tidymodels, the RKM is estimated on the same data used to fit the model and attached to the parsnip object. @@ -184,39 +149,52 @@ For our simulated data, here is what the RKM curve looks like: -The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations. +The red rug on the bottom shows the training point event times and the blue values at the top represent the times for the censored training set observations. As (evaluation) time increases, we pass more and more observed time points, and the probability of being censored, i.e., the probability of an observation to fall into category 2, decreases. -For any dynamic computations, we multiply the case's contributions by the inverse probability of being censored. This is called the inverse probability of censoring weights (IPCW). This should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations. +The weights used in the calculation of the dynamic performance metrics are the inverse of these probabilities, hence the name "inverse probability of censoring weights" (IPCW). These weights should theoretically balance the exposure/effect/influence that the definitive observations have on performance calculations. -### Some computational details +### The finer details -First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf suggests (as it seems most appropriate): +First, when do we evaluate the probability of censoring? There are different approaches used in the literature, and we follow what Graf _et al_ suggest (as it seems most appropriate): - - If the observed time corresponds to an actual event, and that time is before the evaluation time, the probability of being censored is predicted at the observed time. - - If the observed time is after the evaluation time, regardless of the status, the probability of being censored is predicted at the evaluation time. +- If the evaluation time is less than the observed time (like in category 2), the evaluation time is used to predict the probability of censoring. +- If the evaluation is greater than or equal to the event time (like in category 1), the event time is used to predict the probability of censoring. +- If the evaluation time is greater than or equal to the observed censoring time, the observation falls into category 3 and is not used, i.e., also no weight is needed. -Here's an example using the first data point in the validation set: +Graf _et al_ call this time at which to predict the probability of censoring the _weight time_. Here's an example using the first data point in the validation set: ```r +dyn_val_pred <- + val_pred %>% + select(.pred, event_time) %>% + add_rowindex() %>% + unnest(.pred) + dyn_val_pred %>% - filter(.row == 1 & .eval_time %in% c(1, 5, 5.75, 10, 15)) %>% + filter(.row == 1 & .eval_time %in% c(1, 2, 4, 5, 10)) %>% select(event_time, .eval_time, .weight_time, .pred_censored, .weight_censored) #> # A tibble: 5 × 5 #> event_time .eval_time .weight_time .pred_censored .weight_censored #> -#> 1 5.78 1 1.00 0.992 1.01 -#> 2 5.78 5 5.00 0.779 1.28 -#> 3 5.78 5.75 5.75 0.714 1.40 -#> 4 5.78 10 5.78 0.710 1.41 -#> 5 5.78 15 5.78 0.710 1.41 +#> 1 4.83 1 1.00 0.991 1.01 +#> 2 4.83 2 2.00 0.966 1.04 +#> 3 4.83 4 4.00 0.848 1.18 +#> 4 4.83 5 4.83 0.786 1.27 +#> 5 4.83 10 4.83 0.786 1.27 ``` -The observed event time was 5.779. Up until that evaluation time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. +This observation was an event, observed at time 4.832. The column `.weight_time` captures at which time the probability of censoring was calculated. Up until that event time, the probability of being censored is computed at the evaluation time. After that, it is based on the event time. + +We add a slight modification to the weight time: If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring just before the requested weight time. We are basically subtracting a small numerical value from the weight time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. + +Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the censoring model to include covariates (as well as models beyond the RKM). + +## Illustration: Confusion matrix -We also slightly modify the time that the censoring probability is computed. If our evaluation time is today, we don't have today's data yet. In tidymodels, we calculate the probability of censoring is computed just before the requested evaluation time. We are basically subtracting a small numerical value from the evaluation time used in the RKM model. You'll only really see a difference if there is a bulk of censored observations at the original evaluation time. +To illustrate how these two tools for accounting for censoring are used in calculating dynamic performance metrics, we'll take a look here at the 2x2 confusion matrices at a few evaluation time points. More details on performance metrics for censored data are in the aforementioned [Dynamic Performance Metrics for Event Time Data](FIXME) article. -Finally, we use a simple RKM curve (i.e., no covariates or strata). This implies that there is non-informative censoring. Other applications of IPCW try to mitigate the effects of informative censoring. In the future, we may allow the model to include covariates (as well as models beyond the RKM). +First, let's turn the observed event time data and the predictions into their binary versions. ```r @@ -247,9 +225,21 @@ binary_encoding <- ) ``` -While another article delves into the details of performance metrics for censored data, we'll look at the 2x2 confusion matrices at a few time points. +Remember how observations falling into category 3 are removed from the analysis? This means we'll likely have fewer data points to evaluate as the evaluation time increases. This implies that the variation in the metrics will be considerable as evaluation time goes on. For our simulated training set: -Let's start with an evaluation time of 1.00. Ordinarily, we would simply use: + +```r +dyn_val_pred %>% + summarize(num_usable = sum(!is.na(.weight_censored)), .by = c(.eval_time)) %>% + ggplot() + + geom_step(aes(.eval_time, num_usable)) + + labs(x = "time", y = "number of usable observations") + + lims(y = c(0, nrow(sim_val))) +``` + + + +Keeping this in mind, let's look at what happens with the data points we can use. Let's start with an evaluation time of 1.00. To compute the confusion matrix for a classification problem, we would simply use: ```r binary_encoding %>% @@ -257,7 +247,7 @@ binary_encoding %>% conf_mat(truth = obs_class, estimate = pred_class) ``` -to compute the metric. This would ignore the censoring weights so we'll add the `case_weights` argument: +For censored regression problems, we need to additionally use the censoring weights so we'll include them via the `case_weights` argument: ```r @@ -267,17 +257,17 @@ binary_encoding %>% estimate = pred_class, case_weights = .weight_censored) #> Truth -#> Prediction event non-event -#> event 0.00 0.00 -#> non-event 6.03 244.99 +#> Prediction event non-event +#> event 0.0 0.0 +#> non-event 14.1 482.5 ``` -The values in the cells are the sum of the censoring weights, There are 6 actual events (out of 249 usable values) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so there inverse values are also. +The values in the cells are the sum of the censoring weights, There are 14 actual events (out of 492 usable observations) before this evaluation time, so there are empty cells. Also note that the cell values are close to the actual counts. This early, the predicted censoring probabilities are very close to one so their inverse values are also. -This early, performance looks very good but that is mostly since there are few events and they are all predicted well (at least with the default 50% cutoff). +This early, performance looks very good but that is mostly because there are few events. Let's shift to an evaluation time of 5.0. @@ -290,13 +280,13 @@ binary_encoding %>% case_weights = .weight_censored) #> Truth #> Prediction event non-event -#> event 72.6 24.4 -#> non-event 30.7 119.4 +#> event 113.0 54.4 +#> non-event 56.1 252.4 ``` -Now we have fewer total observations to consider (205 instead of 249 usable values) and more events (93 this time). Performance is fairly good; the sensitivity is 70.3% and the specificty is 83%. +Now we have fewer total observations to consider (391 instead of 492 usable values) and more events (154 this time). Performance is fairly good; the sensitivity is 66.8% and the specificty is 82.3%. What happends when the evaluation time is 17? @@ -309,20 +299,21 @@ binary_encoding %>% case_weights = .weight_censored) #> Truth #> Prediction event non-event -#> event 245.4 24.7 -#> non-event 0.0 0.0 +#> event 430 123 +#> non-event 0 0 ``` -The data are overwhelmingly events. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is 1.81. +The data are overwhelmingly events. Also, the censoring weights are larger now since the probability of being censored is very low. The mean censoring weight is 1.96. -There's more on dynamic performance metrics in [another article](). +This concludes the illustration of how to account for censoring when using a confusion matrix for performance assessment. There's more on dynamic performance metrics in the [Dynamic Performance Metrics for Event Time Data](FIXME) article. ## Summary -When converting censored event time data to a binary format, the main point to remember are: +When accounting for censoring in dynamic performance metrics, the main points to remember are: +* Event time data can be converted to a binary format. * Some data points cannot be used in the calculations. * To properly estimate statistical quantities, we weight the computations by the inverse of the probability of being censored. * tidymodels currently assumes non-informative censoring. @@ -335,37 +326,38 @@ When converting censored event time data to a binary format, the main point to r #> ─ Session info ───────────────────────────────────────────────────── #> setting value #> version R version 4.2.0 (2022-04-22) -#> os macOS Big Sur/Monterey 10.16 -#> system x86_64, darwin17.0 +#> os macOS 13.4 +#> system aarch64, darwin20 #> ui X11 #> language (EN) #> collate en_US.UTF-8 #> ctype en_US.UTF-8 -#> tz America/New_York -#> date 2023-05-16 -#> pandoc 3.1.1 @ /Applications/RStudio.app/Contents/Resources/app/quarto/bin/tools/ (via rmarkdown) +#> tz Europe/London +#> date 2023-06-05 +#> pandoc 2.19.2 @ /Applications/RStudio.app/Contents/Resources/app/quarto/bin/tools/ (via rmarkdown) #> #> ─ Packages ───────────────────────────────────────────────────────── #> package * version date (UTC) lib source #> broom * 1.0.4 2023-03-11 [1] CRAN (R 4.2.0) -#> censored * 0.2.0.9000 2023-05-16 [1] Github (tidymodels/censored@f9eccb6) +#> censored * 0.2.0.9000 2023-05-22 [1] Github (tidymodels/censored@f9eccb6) #> dials * 1.2.0 2023-04-03 [1] CRAN (R 4.2.0) #> dplyr * 1.1.2 2023-04-20 [1] CRAN (R 4.2.0) #> ggplot2 * 3.4.2 2023-04-03 [1] CRAN (R 4.2.0) #> infer * 1.0.4 2022-12-02 [1] CRAN (R 4.2.0) -#> parsnip * 1.1.0.9001 2023-05-16 [1] Github (tidymodels/parsnip@ab42409) +#> parsnip * 1.1.0.9002 2023-06-05 [1] local #> prodlim * 2023.03.31 2023-04-02 [1] CRAN (R 4.2.0) #> purrr * 1.0.1 2023-01-10 [1] CRAN (R 4.2.0) -#> recipes * 1.0.6 2023-04-25 [1] CRAN (R 4.2.0) +#> recipes * 1.0.6.9000 2023-05-17 [1] local #> rlang 1.1.1 2023-04-28 [1] CRAN (R 4.2.0) -#> rsample * 1.1.1 2022-12-07 [1] CRAN (R 4.2.0) +#> rsample * 1.1.1.9000 2023-04-21 [1] local #> tibble * 3.2.1 2023-03-20 [1] CRAN (R 4.2.0) #> tidymodels * 1.1.0 2023-05-01 [1] CRAN (R 4.2.0) -#> tune * 1.1.1.9001 2023-05-16 [1] Github (tidymodels/tune@fdc0016) -#> workflows * 1.1.3 2023-02-22 [1] CRAN (R 4.2.0) +#> tune * 1.1.1.9000 2023-04-20 [1] local +#> workflows * 1.1.3.9000 2023-04-03 [1] local #> yardstick * 1.2.0 2023-04-21 [1] CRAN (R 4.2.0) #> -#> [1] /Library/Frameworks/R.framework/Versions/4.2/Resources/library +#> [1] /Users/hannah/Library/R/arm64/4.2/library +#> [2] /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library #> #> ──────────────────────────────────────────────────────────────────── ``` From 1a3f24bb757aa60d7fd99e06b14ef6ce8392a6d9 Mon Sep 17 00:00:00 2001 From: Hannah Frick Date: Fri, 9 Jun 2023 14:21:51 +0100 Subject: [PATCH 6/6] helpers are now exported not using `.time_as_binary_event()` since that requires a single value for eval_time --- content/learn/statistics/survival-ipcw/index.Rmarkdown | 5 ++--- content/learn/statistics/survival-ipcw/index.markdown | 9 ++++----- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/content/learn/statistics/survival-ipcw/index.Rmarkdown b/content/learn/statistics/survival-ipcw/index.Rmarkdown index 11b24185..5ebad214 100644 --- a/content/learn/statistics/survival-ipcw/index.Rmarkdown +++ b/content/learn/statistics/survival-ipcw/index.Rmarkdown @@ -251,10 +251,9 @@ To illustrate how these two tools for accounting for censoring are used in calcu First, let's turn the observed event time data and the predictions into their binary versions. ```{r} -# TODO I'm going to add this to parsnip time_as_binary_event <- function(surv, eval_time) { - event_time <- parsnip:::.extract_surv_time(surv) # TODO we should export these - status <- parsnip:::.extract_surv_status(surv) + event_time <- .extract_surv_time(surv) + status <- .extract_surv_status(surv) is_event_before_t <- event_time <= eval_time & status == 1 # Three possible contributions to the statistic from Graf 1999 diff --git a/content/learn/statistics/survival-ipcw/index.markdown b/content/learn/statistics/survival-ipcw/index.markdown index b10b360c..93c84573 100644 --- a/content/learn/statistics/survival-ipcw/index.markdown +++ b/content/learn/statistics/survival-ipcw/index.markdown @@ -198,10 +198,9 @@ First, let's turn the observed event time data and the predictions into their bi ```r -# TODO I'm going to add this to parsnip time_as_binary_event <- function(surv, eval_time) { - event_time <- parsnip:::.extract_surv_time(surv) # TODO we should export these - status <- parsnip:::.extract_surv_status(surv) + event_time <- .extract_surv_time(surv) + status <- .extract_surv_status(surv) is_event_before_t <- event_time <= eval_time & status == 1 # Three possible contributions to the statistic from Graf 1999 @@ -333,7 +332,7 @@ When accounting for censoring in dynamic performance metrics, the main points to #> collate en_US.UTF-8 #> ctype en_US.UTF-8 #> tz Europe/London -#> date 2023-06-05 +#> date 2023-06-09 #> pandoc 2.19.2 @ /Applications/RStudio.app/Contents/Resources/app/quarto/bin/tools/ (via rmarkdown) #> #> ─ Packages ───────────────────────────────────────────────────────── @@ -344,7 +343,7 @@ When accounting for censoring in dynamic performance metrics, the main points to #> dplyr * 1.1.2 2023-04-20 [1] CRAN (R 4.2.0) #> ggplot2 * 3.4.2 2023-04-03 [1] CRAN (R 4.2.0) #> infer * 1.0.4 2022-12-02 [1] CRAN (R 4.2.0) -#> parsnip * 1.1.0.9002 2023-06-05 [1] local +#> parsnip * 1.1.0.9003 2023-06-08 [1] Github (tidymodels/parsnip@a85e508) #> prodlim * 2023.03.31 2023-04-02 [1] CRAN (R 4.2.0) #> purrr * 1.0.1 2023-01-10 [1] CRAN (R 4.2.0) #> recipes * 1.0.6.9000 2023-05-17 [1] local