Skip to content

Commit d20f48d

Browse files
committed
Update tests
1 parent ecd7d63 commit d20f48d

5 files changed

Lines changed: 61 additions & 58 deletions

File tree

DESCRIPTION

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ Imports:
5050
ranger,
5151
pec
5252
Suggests:
53-
testthat,
53+
testthat (>= 3.0.0),
5454
covr,
5555
pmsampsize,
5656
rmarkdown,
@@ -62,3 +62,4 @@ LazyData: false
6262
Roxygen: list(markdown = TRUE)
6363
RoxygenNote: 7.3.3
6464
BuildVignettes: false
65+
Config/testthat/edition: 3

R/print.R

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2,21 +2,21 @@
22
#' @export
33
print.pmsims <- function(x, ..., max_width = 80) {
44
if (!inherits(x, "pmsims")) stop("Object is not of class 'pmsims'")
5-
5+
66
`%||%` <- function(a, b) if (!is.null(a)) a else b
7-
7+
88
has_cli <- requireNamespace("cli", quietly = TRUE)
99
has_crayon <- requireNamespace("crayon", quietly = TRUE)
1010
has_tools <- requireNamespace("tools", quietly = TRUE)
11-
11+
1212
bold <- function(txt) if (has_crayon) crayon::bold(txt) else txt
1313
cyan <- function(txt) if (has_crayon) crayon::cyan(txt) else txt
1414
blue <- function(txt) if (has_crayon) crayon::blue(txt) else txt
1515
dimc <- function(txt) if (has_crayon) crayon::silver(txt) else txt
1616
italic <- function(txt) if (has_crayon) crayon::italic(txt) else txt
17-
17+
1818
scr_width <- min(getOption("width", 80L), as.integer(max_width))
19-
19+
2020
rule <- function(title = NULL) {
2121
if (has_cli) {
2222
if (is.null(title)) cli::cat_rule(width = scr_width)
@@ -26,10 +26,10 @@ print.pmsims <- function(x, ..., max_width = 80) {
2626
cat(strrep("-", scr_width), "\n", sep = "")
2727
}
2828
}
29-
29+
3030
is_present <- function(v) !is.null(v) && !(length(v) == 1L && is.na(v))
3131
plural <- function(n, unit) sprintf("%d %s", n, if (n == 1) unit else paste0(unit, "s"))
32-
32+
3333
fmt_duration <- function(simt) {
3434
secs <- if (inherits(simt, "difftime")) as.numeric(simt, units = "secs")
3535
else if (is.numeric(simt)) as.numeric(simt) else return("<NA>")
@@ -45,7 +45,7 @@ print.pmsims <- function(x, ..., max_width = 80) {
4545
if (s > 0 || length(parts) == 0) parts <- c(parts, plural(s, "second"))
4646
paste(parts, collapse = " ")
4747
}
48-
48+
4949
fmt_num <- function(v, digits = 3) {
5050
if (!is_present(v)) return("<NA>")
5151
if (is.numeric(v) && length(v) == 1L) formatC(v, format = "f", digits = digits)
@@ -55,7 +55,7 @@ print.pmsims <- function(x, ..., max_width = 80) {
5555
if (!is_present(v)) return("<NA>")
5656
format(v, big.mark = ",", scientific = FALSE)
5757
}
58-
58+
5959
pretty_metric <- function(metric) {
6060
if (!is_present(metric)) return(NULL)
6161
m <- tolower(metric)
@@ -70,7 +70,7 @@ print.pmsims <- function(x, ..., max_width = 80) {
7070
)
7171
paste0(nice, " ", dimc(sprintf("('%s')", metric)))
7272
}
73-
73+
7474
moa <- x$mean_or_assurance %||% "mean"
7575
model <- x$model %||% NA_character_
7676
target <- x$target_performance %||% x$minimum_acceptable_performance %||% NA_real_
@@ -80,7 +80,7 @@ print.pmsims <- function(x, ..., max_width = 80) {
8080
simtime <- x$simulation_time %||% NA
8181
cstatistic <- x$cstatistic
8282
r2 <- x$r2
83-
83+
8484
# --- Inputs table (non-null only)
8585
inputs <- list(
8686
"Outcome" = x$outcome,
@@ -101,7 +101,7 @@ print.pmsims <- function(x, ..., max_width = 80) {
101101
inputs[["Simulation reps"]] <- fmt_int(x$n_reps_total)
102102
keep_inputs <- vapply(inputs, is_present, logical(1))
103103
inputs <- inputs[keep_inputs]
104-
104+
105105
# --- Results table
106106
results <- list(
107107
"Final minimum sample size" = bold(fmt_int(min_n)),
@@ -114,7 +114,7 @@ print.pmsims <- function(x, ..., max_width = 80) {
114114
)
115115
keep_results <- vapply(results, is_present, logical(1))
116116
results <- results[keep_results]
117-
117+
118118
# --- Header box
119119
if (has_cli) {
120120
cli::cat_boxx(
@@ -126,28 +126,28 @@ print.pmsims <- function(x, ..., max_width = 80) {
126126
} else {
127127
cat("\n", bold("pmsims: Sample size simulation summary"), "\n", sep = "")
128128
}
129-
129+
130130
# --- Shared alignment width for BOTH tables
131131
shared_w <- max(nchar(c(names(inputs), names(results))), 25L)
132-
132+
133133
# --- Inputs (aligned two-column, cyan labels)
134134
rule("Inputs")
135135
for (nm in names(inputs)) {
136136
label <- format(nm, width = shared_w, justify = "right")
137137
cat(" ", cyan(label), " : ", inputs[[nm]], "\n", sep = "")
138138
}
139-
139+
140140
# --- Results (aligned two-column, blue labels)
141141
rule("Results")
142142
for (nm in names(results)) {
143143
label <- format(nm, width = shared_w, justify = "right")
144144
cat(" ", blue(label), " : ", results[[nm]], "\n", sep = "")
145145
}
146-
146+
147147
cat(" ", dimc(if (tolower(moa) == "assurance")
148148
italic("Assurance mode ensures the target metric is met with high probability across repeated datasets.")
149149
else
150-
"Mean mode ensures the target metric is met on average across datasets."), "\n", sep = "")
151-
150+
italic("Mean mode ensures the target metric is met on average across datasets.")), "\n", sep = "")
151+
152152
invisible(x)
153-
}
153+
}

pkgdown/about.md

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,6 @@ title: "How pmsims works"
77
**pmsims** estimates the minimum sample size needed for a prediction model to achieve adequate performance with high probability (“assurance”).
88
It does this by simulating datasets, fitting models, evaluating performance, and tracing how performance improves as the sample size grows — a *learning curve*.
99

10-
---
11-
1210
# 1. Conceptual background
1311

1412
Prediction models must be trained on enough data to generalise beyond the development sample.
@@ -18,8 +16,6 @@ Analytic formulae (e.g., *pmsampsize*) rely on assumptions that often fail for m
1816

1917
**Simulation-based approaches**, like pmsims, overcome these limitations by explicitly generating data and empirically assessing model behaviour across different training sizes.
2018

21-
---
22-
2319
# 2. The pmsims workflow
2420

2521
The package operationalises the simulation-based framework in four modular steps:
@@ -56,8 +52,6 @@ The learning curve is used to identify the smallest `n` where the chosen quantil
5652
This gives the required training size for which the model is expected
5753
to achieve adequate performance in at least 80% of cases.
5854

59-
---
60-
6155
# 3. Interpretation
6256

6357
Two complementary criteria can be applied:

tests/testthat.R

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,11 @@
1+
# This file is part of the standard setup for testthat.
2+
# It is recommended that you do not modify it.
3+
#
4+
# Where should you do additional test configuration?
5+
# Learn more about the roles of various files in:
6+
# * https://r-pkgs.org/testing-design.html#sec-tests-files-overview
7+
# * https://testthat.r-lib.org/articles/special-files.html
8+
19
library(testthat)
210
library(pmsims)
311

tests/testthat/test-simulate_wrappers.R

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,15 @@ test_that("simulate_binary returns a pmsims object", {
33
set.seed(2024)
44

55
result <- simulate_binary(
6-
signal_parameters = 3,
7-
noise_parameters = 2,
6+
signal_parameters = 10,
7+
noise_parameters = 0,
88
predictor_type = "continuous",
99
outcome_prevalence = 0.2,
1010
large_sample_cstatistic = 0.75,
1111
metric = "calibration_slope",
1212
minimum_acceptable_performance = 0.9,
13-
n_reps_total = 60,
14-
mean_or_assurance = "mean"
13+
n_reps_total = 1000,
14+
mean_or_assurance = "assurance"
1515
)
1616

1717
expect_s3_class(result, "pmsims")
@@ -26,38 +26,38 @@ test_that("simulate_continuous returns a pmsims object", {
2626
set.seed(1111)
2727

2828
result <- simulate_continuous(
29-
signal_parameters = 4,
30-
noise_parameters = 2,
29+
signal_parameters = 10,
30+
noise_parameters = 0,
3131
predictor_type = "continuous",
3232
large_sample_rsquared = 0.5,
3333
metric = "calibration_slope",
34-
minimum_acceptable_performance = 1.0,
35-
n_reps_total = 60,
36-
mean_or_assurance = "mean"
34+
minimum_acceptable_performance = 0.9,
35+
n_reps_total = 1000,
36+
mean_or_assurance = "assurance"
3737
)
3838

3939
expect_s3_class(result, "pmsims")
4040
expect_equal(result$outcome, "continuous")
4141
expect_true(is.numeric(result$min_n))
4242
expect_gt(result$min_n, 0)
43-
expect_equal(result$target_performance, 1.0)
43+
expect_equal(result$target_performance, 0.9)
4444
})
4545

4646
test_that("simulate_survival returns a pmsims object", {
4747
skip_on_cran()
4848
set.seed(765)
4949

5050
result <- simulate_survival(
51-
signal_parameters = 4,
52-
noise_parameters = 2,
51+
signal_parameters = 10,
52+
noise_parameters = 0,
5353
predictor_type = "continuous",
5454
large_sample_cindex = 0.75,
5555
baseline_hazard = 0.01,
5656
censoring_rate = 0.3,
5757
metric = "calibration_slope",
5858
minimum_acceptable_performance = 0.9,
59-
n_reps_total = 60,
60-
mean_or_assurance = "mean"
59+
n_reps_total = 1000,
60+
mean_or_assurance = "assurance"
6161
)
6262

6363
expect_s3_class(result, "pmsims")
@@ -70,47 +70,47 @@ test_that("simulate_survival returns a pmsims object", {
7070
test_that("wrapper calibration slope bounds are enforced", {
7171
expect_error(
7272
simulate_binary(
73-
signal_parameters = 3,
74-
noise_parameters = 1,
73+
signal_parameters = 10,
74+
noise_parameters = 0,
7575
predictor_type = "continuous",
7676
outcome_prevalence = 0.2,
7777
large_sample_cstatistic = 0.75,
7878
metric = "calibration_slope",
7979
minimum_acceptable_performance = 0.7,
80-
n_reps_total = 40,
81-
mean_or_assurance = "mean"
80+
n_reps_total = 1000,
81+
mean_or_assurance = "assurance"
8282
),
8383
"Suggested calibration slope is too low",
8484
fixed = TRUE
8585
)
8686

8787
expect_error(
8888
simulate_continuous(
89-
signal_parameters = 3,
90-
noise_parameters = 1,
89+
signal_parameters = 10,
90+
noise_parameters = 0,
9191
predictor_type = "continuous",
9292
large_sample_rsquared = 0.5,
9393
metric = "calibration_slope",
9494
minimum_acceptable_performance = 1.3,
95-
n_reps_total = 40,
96-
mean_or_assurance = "mean"
95+
n_reps_total = 1000,
96+
mean_or_assurance = "assurance"
9797
),
9898
"Suggested calibration slope is too high",
9999
fixed = TRUE
100100
)
101101

102102
expect_error(
103103
simulate_survival(
104-
signal_parameters = 3,
105-
noise_parameters = 1,
104+
signal_parameters = 10,
105+
noise_parameters = 0,
106106
predictor_type = "continuous",
107107
large_sample_cindex = 0.75,
108108
baseline_hazard = 0.01,
109109
censoring_rate = 0.3,
110110
metric = "calibration_slope",
111111
minimum_acceptable_performance = 0.7,
112-
n_reps_total = 40,
113-
mean_or_assurance = "mean"
112+
n_reps_total = 1000,
113+
mean_or_assurance = "assurance"
114114
),
115115
"Suggested calibration slope is too low",
116116
fixed = TRUE
@@ -120,15 +120,15 @@ test_that("wrapper calibration slope bounds are enforced", {
120120
test_that("simulate_binary requires achievable AUC targets", {
121121
expect_error(
122122
simulate_binary(
123-
signal_parameters = 3,
124-
noise_parameters = 1,
123+
signal_parameters = 10,
124+
noise_parameters = 0
125125
predictor_type = "continuous",
126126
outcome_prevalence = 0.2,
127-
large_sample_cstatistic = 0.82,
127+
large_sample_cstatistic = 0.80,
128128
metric = "auc",
129129
minimum_acceptable_performance = 0.9,
130-
n_reps_total = 40,
131-
mean_or_assurance = "mean"
130+
n_reps_total = 1000,
131+
mean_or_assurance = "assurance"
132132
),
133133
"Requested minimum acceptable AUC exceeds the expected large-sample performance",
134134
fixed = TRUE

0 commit comments

Comments
 (0)