From ab2826dad7a919803a749bda345cc84cce79e732 Mon Sep 17 00:00:00 2001 From: Xyarz Date: Fri, 26 Jul 2024 11:37:55 +0200 Subject: [PATCH] removed outdated folder --- .../Comparison_BayesianMCPMod.qmd | 506 -------- .../Comparison_MCPModPack.qmd | 1113 ----------------- .../Comparison_convergence.qmd | 419 ------- .../Comparison_vignette.qmd | 1020 --------------- vignettes/outdatet/Simulation_Example.Rmd | 163 --- vignettes/outdatet/analysis_normal.Rmd | 286 ----- .../analysis_normal_noninformative.qmd | 414 ------ 7 files changed, 3921 deletions(-) delete mode 100644 vignettes/outdatet/Comparison_vignette_outdatet/Comparison_BayesianMCPMod.qmd delete mode 100644 vignettes/outdatet/Comparison_vignette_outdatet/Comparison_MCPModPack.qmd delete mode 100644 vignettes/outdatet/Comparison_vignette_outdatet/Comparison_convergence.qmd delete mode 100644 vignettes/outdatet/Comparison_vignette_outdatet/Comparison_vignette.qmd delete mode 100644 vignettes/outdatet/Simulation_Example.Rmd delete mode 100644 vignettes/outdatet/analysis_normal.Rmd delete mode 100644 vignettes/outdatet/analysis_normal_noninformative.qmd diff --git a/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_BayesianMCPMod.qmd b/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_BayesianMCPMod.qmd deleted file mode 100644 index a18a3d3..0000000 --- a/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_BayesianMCPMod.qmd +++ /dev/null @@ -1,506 +0,0 @@ ---- -title: "Vignette BayesianMCPMod - BayesianMCPMod Analysis" -subtitle: "WORK IN PROGRESS" -date: today -format: - html: - fig-height: 3.5 - self-contained: true - toc: true - number-sections: true - #bibliography: references.bib -vignette: > - %\VignetteIndexEntry{Simulation Example of Bayesian MCPMod and MCPMod} - %\VignetteEncoding{UTF-8} - %\VignetteEngine{quarto::html} ---- - -Following simulations will be conducted utilizing the BayesianMCPMod' package, with varying the expected effect for maximum dose and the sample sizes. - - -# Prior Specification -In a first step an uninformativ prior is calculated. - -```{r} -uninf_prior_list <- list( - Ctrl = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim, param = "mn"), - DG_1 = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim, param = "mn"), - DG_2 = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim, param = "mn"), - DG_3 = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim, param = "mn"), - DG_4 = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim, param = "mn") - -) -``` - -To calculate success probabilities for the different assumed dose-response models and the specified trial design we will apply the assessDesign function. - -## Minimal scnenario -### varying expected effect for maximum dose - - -```{r} - -# list to store results -results_list_Bay_min <- list() - - -list_max_eff <- as.list(c(0.05,0.1,0.2,0.3,0.5)) -chunks <- chunkVector(seq_along(list_max_eff), getDoParWorkers()) - - - -results_list_Bay_min <- foreach(k = chunks, .combine = c) %dorng% { - - lapply(k, function (i) { - - min_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = list_max_eff[[i]], - direction = "increasing") - #optimal contrasts -contM <- getContr(mods = min_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - - # Simulation step - success_probabilities_min <- assessDesign( - n_patients = Nsample, - mods = min_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim, - alpha_crit_val = alpha, - contr = contM) - - }) - -} - -results_min_Bay <- extract_success_rates(results_list_Bay_min, min_scenario) - -minimal_Bay <- print_result_Bay_max_eff(results_min_Bay, c(min_scenario, "average"), expectedEffect) - -minimal_Bay$kable_result - - - -``` -### varying sample size - - -```{r} -# Define the list of sample sizes -nsample_list = list(20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1)) - -#optimal contrasts -#allocation c(1,1,1,1,1) -contM <- getContr(mods = min_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - -#allocation c(2,1,1,1,2) -contM_2 <- getContr(mods = min_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(2,1,1,1,2)) -contrasts_list = list(contM_2, contM, contM_2, contM) - -# Initialize list to store results -results_list_nsample_min_Bay <- list() - -#Models with maxEff = 0.2 -min_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing") - - - -chunks <- chunkVector(seq_along(nsample_list), getDoParWorkers()) - -results_list_nsample_min_Bay <- foreach(k = chunks, .combine = c, .export = c(as.character(min_models), as.character(contrasts_list))) %dorng% { - - lapply(k, function (i) { - - success_probabilities_min <- assessDesign( - n_patients = nsample_list[[i]], - mods = min_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim, - alpha_crit_val = alpha, - contr = contrasts_list[[i]]) - - }) -} - - -results_min_Bay_nsample <- extract_success_rates_nsample(results_list_nsample_min_Bay, min_scenario) - -minimal_nsample_Bay <- print_result_Bay_nsample(results_min_Bay_nsample, c(min_scenario, "average"), nsample_vector) -minimal_nsample_Bay$kable_result - - - -``` - - - -## Monotonic scenario - -### varying expected effect for maximum dose - -```{r} -results_list_Bay_monotonic <- list() - - - - -list_max_eff <- as.list(c(0.05,0.1,0.2,0.3,0.5)) -chunks <- chunkVector(seq_along(list_max_eff), getDoParWorkers()) - - - -results_list_Bay_monotonic <- foreach(k = chunks, .combine = c) %dorng% { - - lapply(k, function (i) { - - monotonic_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - logistic=logit.g, - sigEmax = sigEmax.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = list_max_eff[[i]], - direction = "increasing") - - # optimal contrasts - contM <- getContr(mods = monotonic_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - - # perform Simulations - success_probabilities_monotonic <- assessDesign( - n_patients = Nsample, - mods = monotonic_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim, - alpha_crit_val = alpha, - contr = contM) - - - - }) - -} - - -##power results in vector for tables -results_monotonic_Bay <- extract_success_rates(results_list_Bay_monotonic, monotonic_scenario) - - -monotonic_Bay <- print_result_Bay_max_eff(results_monotonic_Bay, c(monotonic_scenario, "average"), expectedEffect) - - -monotonic_Bay$kable_result - - - -``` - -### varying sample size -```{r} -# Initialize list to store results -results_list_nsample_monotonic_Bay <- list() - -# dose - response models -monotonic_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - logistic=logit.g, - sigEmax = sigEmax.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing") - -#optimal contrasts -#allocation c(1,1,1,1,1) -contM <- getContr(mods = monotonic_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - -#allocation c(2,1,1,1,2) -contM_2 <- getContr(mods = monotonic_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(2,1,1,1,2)) -contrasts_list = list(contM_2, contM, contM_2, contM) - - -chunks <- chunkVector(seq_along(nsample_list), getDoParWorkers()) - -results_list_nsample_monotonic_Bay <- foreach(k = chunks, .combine = c, .export = c(as.character(monotonic_models), as.character(contrasts_list))) %dorng% { - - lapply(k, function (i) { - - success_probabilities_monotonic <- assessDesign( - n_patients = nsample_list[[i]], - mods = monotonic_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim, - alpha_crit_val = alpha, - contr = contrasts_list[[i]]) - }) -} - - -results_monotonic_Bay_nsample <- extract_success_rates_nsample(results_list_nsample_monotonic_Bay, monotonic_scenario) - - -monotonic_nsample_Bay <- print_result_Bay_nsample(results_monotonic_Bay_nsample, c(monotonic_scenario, "average"), nsample_vector) -monotonic_nsample_Bay$kable_result - - -``` - - -## Non-monotonic scenario -### varying expected effect for maximum dose -```{r} -results_list_Bay_non_monotonic <- list() - - -list_max_eff <- as.list(c(0.05,0.1,0.2,0.3,0.5)) -chunks <- chunkVector(seq_along(list_max_eff), getDoParWorkers()) - - - -results_list_Bay_non_monotonic <- foreach(k = chunks, .combine = c) %dorng% { - - lapply(k, function (i) { - - - non_monotonic_models <- Mods(linear=NULL, - emax = emax.g, - sigEmax = sigEmax.g, - quadratic = quad.g, - betaMod = beta.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = list_max_eff[[i]], - direction = "increasing", - addArgs = list(scal=9.6)) - - #optimal contrasts - contM <- getContr(mods = non_monotonic_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - - success_probabilities_non_monotonic <- assessDesign( - n_patients = Nsample, - mods = non_monotonic_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim, - alpha_crit_val = alpha, - contr = contM) - - - }) - -} - -results_non_monotonic_Bay <- extract_success_rates(results_list_Bay_non_monotonic, non_monotonic_scenario) - -non_monotonic_Bay <- print_result_Bay_max_eff(results_non_monotonic_Bay, c(non_monotonic_scenario, "average"), expectedEffect) - -non_monotonic_Bay$kable_result - - -``` -### varying sample size -```{r} -# Initialize list to store results -results_list_nsample_non_monotonic_Bay <- list() - -non_monotonic_models <- Mods(linear=NULL, - emax = emax.g, - sigEmax = sigEmax.g, - quadratic = quad.g, - betaMod = beta.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing", - addArgs = list(scal=9.6)) - - #optimal contrasts -contM <- getContr(mods = non_monotonic_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - -contM_2 <- getContr(mods = non_monotonic_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(2,1,1,1,2)) - -contrasts_list = list(contM_2, contM, contM_2, contM) - - - -chunks <- chunkVector(seq_along(nsample_list), getDoParWorkers()) - -results_list_nsample_non_monotonic_Bay <- foreach(k = chunks, .combine = c, .export = c(as.character(non_monotonic_models), as.character(contrasts_list))) %dorng% { - - lapply(k, function (i) { - - success_probabilities_non_monotonic <- assessDesign( - n_patients = nsample_list[[i]], - mods = non_monotonic_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim, - alpha_crit_val = alpha, - contr = contrasts_list[[i]]) - }) -} - - -results_non_monotonic_Bay_nsample <- extract_success_rates_nsample(results_list_nsample_non_monotonic_Bay, non_monotonic_scenario) - - -non_monotonic_nsample_Bay <- print_result_Bay_nsample(results_non_monotonic_Bay_nsample, c(non_monotonic_scenario, "average"), nsample_vector) -non_monotonic_nsample_Bay$kable_result - - -``` - - -## Variability scenario -### varying expected effect for maximum dose -```{r} - -# prior -var_uninf_prior_list <- list( - Ctrl = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim_var, param = "mn"), - DG_1 = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim_var, param = "mn"), - DG_2 = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim_var, param = "mn"), - DG_3 = RBesT::mixnorm(comp1 = c(w = 1, m = 0, n = 1), sigma = sd.sim_var, param = "mn")) - -results_list_Bay_var <- list() - - - -list_max_eff <- as.list(c(0.05,0.1,0.2,0.3,0.5)) -chunks <- chunkVector(seq_along(list_max_eff), getDoParWorkers()) - - - -results_list_Bay_var <- foreach(k = chunks, .combine = c) %dorng% { - - lapply(k, function (i) { - - var_models <- Mods(linear = NULL, - exponential = exp.g, - emax = emax.g, - doses = doses_var, - placEff = plc.guess, - maxEff = list_max_eff[[i]], - direction = "increasing") - - contM <- getContr(mods = var_models, - dose_levels = doses_var, - prior_list = var_uninf_prior_list, - dose_weights = c(1,1,1,1)) - - success_probabilities_var <- assessDesign( - n_patients = Nsample_var, - mods = var_models, - prior_list = var_uninf_prior_list, - sd = sd.sim_var, - n_sim = n_sim, - alpha_crit_val = alpha, - contr = contM) - - }) - -} - - -results_variability_Bay <- extract_success_rates(results_list_Bay_var, variability_scenario) - -variability_Bay <- print_result_Bay_max_eff(results_variability_Bay, c(variability_scenario, "average"), expectedEffect) - -variability_Bay$kable_result - - -``` -### varying sample size -```{r} -results_list_nsample_Bay_var <- list() - -var_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - doses = doses_var, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing") - -#optimal contrasts -contM <- getContr(mods = var_models, - dose_levels = doses_var, - prior_list = var_uninf_prior_list, - dose_weights = c(1,1,1,1)) - -contM_2 <- getContr(mods = var_models, - dose_levels = doses_var, - prior_list = var_uninf_prior_list, - dose_weights = c(2,1,1,2)) -contrasts_list = list(contM_2, contM, contM_2, contM) - - -nsample_list_var <- list(20*c(2,1,1,2),30*c(1,1,1,1), 24*c(2,1,1,2),36*c(1,1,1,1)) - -chunks <- chunkVector(seq_along(nsample_list_var), getDoParWorkers()) - -results_list_nsample_Bay_var <- foreach(k = chunks, .combine = c, .export = c(as.character(var_models), as.character(contrasts_list))) %dorng% { - - lapply(k, function (i) { - success_probabilities_var <- assessDesign( - n_patients = nsample_list_var[[i]], - mods = var_models, - prior_list = var_uninf_prior_list, - sd = sd.sim_var, - n_sim = n_sim, - alpha_crit_val = alpha, - contr = contrasts_list[[i]]) - - }) -} - - -results_variability_Bay_nsample <- extract_success_rates_nsample(results_list_nsample_Bay_var, variability_scenario) - - -var_nsample_Bay <- print_result_Bay_nsample(results_variability_Bay_nsample, c(variability_scenario, "average"), as.vector(nsample_list_var)) -var_nsample_Bay$kable_result - - -``` \ No newline at end of file diff --git a/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_MCPModPack.qmd b/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_MCPModPack.qmd deleted file mode 100644 index 9143455..0000000 --- a/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_MCPModPack.qmd +++ /dev/null @@ -1,1113 +0,0 @@ ---- -title: "Vignette BayesianMCPMod - MCPModPack Analysis" -subtitle: "WORK IN PROGRESS" -date: today -format: - html: - fig-height: 3.5 - self-contained: true - toc: true - number-sections: true - #bibliography: references.bib -vignette: > - %\VignetteIndexEntry{Simulation Example of Bayesian MCPMod and MCPMod} - %\VignetteEncoding{UTF-8} - %\VignetteEngine{quarto::html} ---- - -Following simulations will be conducted utilizing the MCPModPack package, with varying the expected effect for maximum dose and the sample sizes. - -## Minimal scenario - -### varying expected effect for maximum dose -```{r warning=FALSE} -# assumed dose - response model -sim_models_min_linear = list(linear = NA, - exponential = 4.447149, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_min_exp = list(exponential = 4.447149, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_min_emax = list( emax = 0.6666667, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -# Simulation parameters -sim_parameters = list(n = Nsample, - doses = doses.sim, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim) - -# Initialize list to store results -results_list_min <- list() - -# Run simulation -func_sim <- function(models ,sim_models, sim_parameters){ - -sim_result = MCPModSimulation(endpoint_type = "Normal", - models = models, - alpha = alpha, - direction = "increasing", - model_selection = "aveAIC", - Delta = 0.1, - sim_models = sim_models, - sim_parameters = sim_parameters) -} - - -list_models <- list(sim_models_min_linear, sim_models_min_exp, sim_models_min_emax) -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_min <- foreach(k = chunks, .combine = c) %dorng% { - - lapply(k, function (i) { - - func_sim(min_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - - - - -#store results -results_min_MCP <- data.table( - max_eff = c(0.05, 0.1, 0.2, 0.3, 0.5), - linear = c(results_list_min[[1]]$sim_results$power), - exponential = c(results_list_min[[2]]$sim_results$power), - emax = c(results_list_min[[3]]$sim_results$power), - Average = c(sum(results_list_min[[1]]$sim_results$power[1], results_list_min[[2]]$sim_results$power[1], results_list_min[[3]]$sim_results$power[1])/3, - sum(results_list_min[[1]]$sim_results$power[2], results_list_min[[2]]$sim_results$power[2], results_list_min[[3]]$sim_results$power[2])/3, - sum(results_list_min[[1]]$sim_results$power[3], results_list_min[[2]]$sim_results$power[3], results_list_min[[3]]$sim_results$power[3])/3, - sum(results_list_min[[1]]$sim_results$power[4], results_list_min[[2]]$sim_results$power[4], results_list_min[[3]]$sim_results$power[4])/3, - sum(results_list_min[[1]]$sim_results$power[5], results_list_min[[2]]$sim_results$power[5], results_list_min[[3]]$sim_results$power[5])/3)) - - -kable(results_min_MCP)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects " = 5), font_size = 15, bold = TRUE)%>% - add_header_above(c("Minimal scenario " = 5), font_size = 15, bold = TRUE) - - - -``` - -### varying sample size -```{r warning=FALSE} -#list of different samole sizes -nsample_list = list(20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1)) - -# assumed dose - response model -sim_models_min_linear = list(linear = NA, - max_effect = expectedEffect_fix, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_min_exp = list(exponential = 4.447149, - max_effect = expectedEffect_fix, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_min_emax = list( emax = 0.6666667, - max_effect = expectedEffect_fix, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -# Simulation parameters -sim_parameters = list(n = Nsample, - doses = doses.sim, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim) - - -# store results -power_min_nsample_linear <- vector() -power_min_nsample_exp <- vector() -power_min_nsample_emax <- vector() - - - - - -# # Loop over nsample values -# for (i in seq_along(nsample_list)) { -# -# # Update nsample in simulation parameters -# sim_parameters$n <- nsample_list[[i]] -# -# # Set max_effect to 0.2 -# min_modelsPack$max_effect <- expectedEffect_fix -# -# # power values for different assumed true models -# power_nsample_linear <- func_sim(min_modelsPack, sim_models_min_linear, sim_parameters) -# power_min_nsample_linear[i] <- power_nsample_linear$sim_results$power -# -# power_nsample_exp <- func_sim(min_modelsPack, sim_models_min_exp, sim_parameters) -# power_min_nsample_exp[i] <-power_nsample_exp$sim_results$power -# -# power_nsample_emax <- func_sim(min_modelsPack, sim_models_min_emax, sim_parameters) -# power_min_nsample_emax[i] <- power_nsample_exp$sim_results$power -# -# } -# Set max_effect to 0.2 -min_modelsPack$max_effect <- expectedEffect_fix -#list of different samole sizes -nsample_list = list(20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1), - 20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1), - 20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1)) - -list_models <- list(sim_models_min_linear,sim_models_min_linear,sim_models_min_linear,sim_models_min_linear, - sim_models_min_exp, sim_models_min_exp,sim_models_min_exp,sim_models_min_exp, - sim_models_min_emax, sim_models_min_emax, sim_models_min_emax, sim_models_min_emax) - -#ind_tasks <- 1:12 - -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_min_nsample <- foreach(k = chunks, .combine = c, .export = c(as.character(min_modelsPack), as.character(nsample_list))) %dorng% { - lapply(k, function (i) { - sim_parameters = list(n = nsample_list[[i]], - doses = doses.sim, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim) - - func_sim(min_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - -# store results -results_min_MCP_nsample <- data.table( - N_sample = c("(40,20,20,20,40)","(30,30,30,30,30)", "(48,24,24,24,48)","(36,36,36,36,36)"), - Linear = c(results_list_min_nsample[[1]]$sim_results$power, results_list_min_nsample[[2]]$sim_results$power, results_list_min_nsample[[3]]$sim_results$power, results_list_min_nsample[[4]]$sim_results$power), - Exponential = c(results_list_min_nsample[[5]]$sim_results$power, results_list_min_nsample[[6]]$sim_results$power, results_list_min_nsample[[7]]$sim_results$power, results_list_min_nsample[[8]]$sim_results$power), - Emax = c(results_list_min_nsample[[9]]$sim_results$power, results_list_min_nsample[[10]]$sim_results$power, results_list_min_nsample[[11]]$sim_results$power, results_list_min_nsample[[12]]$sim_results$power), - Average = c(sum(results_list_min_nsample[[1]]$sim_results$power, results_list_min_nsample[[5]]$sim_results$power, results_list_min_nsample[[9]]$sim_results$power)/3, - sum(results_list_min_nsample[[2]]$sim_results$power, results_list_min_nsample[[6]]$sim_results$power, results_list_min_nsample[[10]]$sim_results$power)/3, - sum(results_list_min_nsample[[3]]$sim_results$power, results_list_min_nsample[[7]]$sim_results$power, results_list_min_nsample[[11]]$sim_results$power)/3, - sum(results_list_min_nsample[[4]]$sim_results$power, results_list_min_nsample[[8]]$sim_results$power, results_list_min_nsample[[12]]$sim_results$power)/3) -) - -kable(results_min_MCP_nsample)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different sample sizes " = 5), font_size = 15, bold = TRUE)%>% - add_header_above(c("Minimal scenario " = 5), font_size = 15, bold = TRUE) - - - -``` - -## Monotonic scenario - -### varying expected effect for maximum dose -```{r warning=FALSE} -# assumed dose - response models -sim_models_monotonic_linear = list(linear = NA, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_monotonic_exp = list( exponential = 4.447149, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_monotonic_logistic = list(logistic = c(1.5, 0.2275598), - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_monotonic_sigemax= list(sigemax = c(1.528629, 4.087463), - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_monotonic_emax= list(emax = 0.6666667, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -# Simulation parameters -sim_parameters = list(n = Nsample, - doses = doses.sim, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim) - -# Initialize list to store results -results_list_monotonic <- list() - -list_models <- list(sim_models_monotonic_linear, sim_models_monotonic_exp, sim_models_monotonic_emax, sim_models_monotonic_logistic, sim_models_monotonic_sigemax) -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_monotonic <- foreach(k = chunks, .combine = c) %dorng% { - - lapply(k, function (i) { - - func_sim(monotonic_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - - -results_monotonic_MCP <- data.table( - max_eff = c(0.05, 0.1, 0.2, 0.3, 0.5), - linear = c(results_list_monotonic[[1]]$sim_results$power), - exp = c(results_list_monotonic[[2]]$sim_results$power), - emax = c(results_list_monotonic[[3]]$sim_results$power), - logistic = c(results_list_monotonic[[4]]$sim_results$power), - sigEmax = c(results_list_monotonic[[5]]$sim_results$power), - Average = c((sum(results_list_monotonic[[1]]$sim_results$power[1], results_list_monotonic[[2]]$sim_results$power[1], results_list_monotonic[[3]]$sim_results$power[1], results_list_monotonic[[4]]$sim_results$power[1], results_list_monotonic[[5]]$sim_results$power[1])/5), - (sum(results_list_monotonic[[1]]$sim_results$power[2], results_list_monotonic[[2]]$sim_results$power[2], results_list_monotonic[[3]]$sim_results$power[2], results_list_monotonic[[4]]$sim_results$power[2], results_list_monotonic[[5]]$sim_results$power[2])/5), - (sum(results_list_monotonic[[1]]$sim_results$power[3], results_list_monotonic[[2]]$sim_results$power[3], results_list_monotonic[[3]]$sim_results$power[3], results_list_monotonic[[4]]$sim_results$power[3], results_list_monotonic[[5]]$sim_results$power[3])/5), -(sum(results_list_monotonic[[1]]$sim_results$power[4], results_list_monotonic[[2]]$sim_results$power[4], results_list_monotonic[[3]]$sim_results$power[4], results_list_monotonic[[4]]$sim_results$power[4], results_list_monotonic[[5]]$sim_results$power[4])/5), -(sum(results_list_monotonic[[1]]$sim_results$power[5], results_list_monotonic[[2]]$sim_results$power[5], results_list_monotonic[[3]]$sim_results$power[5], results_list_monotonic[[4]]$sim_results$power[5], results_list_monotonic[[5]]$sim_results$power[5])/5)) -) - -kable(results_monotonic_MCP)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects " = 7), font_size = 15, bold = TRUE)%>% - add_header_above(c("Monotonic scenario " = 7), font_size = 15, bold = TRUE) - - -``` - -### varying sample size -```{r warning=FALSE} -# assumed dose- response model with assumed maximum effect = 0.2 -sim_models_monotonic_linear$max_effect <- expectedEffect_fix -sim_models_monotonic_exp$max_effect <- expectedEffect_fix -sim_models_monotonic_emax$max_effect <- expectedEffect_fix -sim_models_monotonic_logistic$max_effect <- expectedEffect_fix -sim_models_monotonic_sigemax$max_effect <- expectedEffect_fix - -# store results -power_monotonic_nsample_linear <- vector() -power_monotonic_nsample_exp <- vector() -power_monotonic_nsample_emax <- vector() -power_monotonic_nsample_logistic <- vector() -power_monotonic_nsample_sigemax <- vector() - - - -# Set max_effect to 0.2 -monotonic_modelsPack$max_effect <- expectedEffect_fix -#list of different samole sizes -nsample_list = list(20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1), - 20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1), - 20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1), - 20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1), - 20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1)) - -list_models <- list(sim_models_monotonic_linear, sim_models_monotonic_linear, sim_models_monotonic_linear, sim_models_monotonic_linear, - sim_models_monotonic_exp, sim_models_monotonic_exp, sim_models_monotonic_exp, sim_models_monotonic_exp, - sim_models_monotonic_emax, sim_models_monotonic_emax, sim_models_monotonic_emax, sim_models_monotonic_emax, - sim_models_monotonic_logistic, sim_models_monotonic_logistic, sim_models_monotonic_logistic, sim_models_monotonic_logistic, - sim_models_monotonic_sigemax, sim_models_monotonic_sigemax, sim_models_monotonic_sigemax, sim_models_monotonic_sigemax) - - -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_monotonic_nsample <- foreach(k = chunks, .combine = c, .export = c(as.character(monotonic_modelsPack), as.character(nsample_list))) %dorng% { - lapply(k, function (i) { - sim_parameters = list(n = nsample_list[[i]], - doses = doses.sim, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim) - - func_sim(monotonic_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - - -# store results -results_monotonic_MCP_nsample <- data.table( - N_sample = c("(40,20,20,20,40)","(30,30,30,30,30)", "(48,24,24,24,48)","(36,36,36,36,36)"), - Linear = c(results_list_monotonic_nsample[[1]]$sim_results$power, results_list_monotonic_nsample[[2]]$sim_results$power, results_list_monotonic_nsample[[3]]$sim_results$power, results_list_monotonic_nsample[[4]]$sim_results$power), - Exponential = c(results_list_monotonic_nsample[[5]]$sim_results$power, results_list_monotonic_nsample[[6]]$sim_results$power, results_list_monotonic_nsample[[7]]$sim_results$power, results_list_monotonic_nsample[[8]]$sim_results$power), - Emax = c(results_list_monotonic_nsample[[9]]$sim_results$power, results_list_monotonic_nsample[[10]]$sim_results$power, results_list_monotonic_nsample[[11]]$sim_results$power, results_list_monotonic_nsample[[12]]$sim_results$power), - Logistic = c(results_list_monotonic_nsample[[13]]$sim_results$power, results_list_monotonic_nsample[[14]]$sim_results$power, results_list_monotonic_nsample[[15]]$sim_results$power, results_list_monotonic_nsample[[16]]$sim_results$power), - sigEmax = c(results_list_monotonic_nsample[[17]]$sim_results$power, results_list_monotonic_nsample[[18]]$sim_results$power, results_list_monotonic_nsample[[19]]$sim_results$power, results_list_monotonic_nsample[[20]]$sim_results$power), -Average = c((sum(results_list_monotonic_nsample[[1]]$sim_results$power, results_list_monotonic_nsample[[5]]$sim_results$power, results_list_monotonic_nsample[[9]]$sim_results$power, ... = results_list_monotonic_nsample[[13]]$sim_results$power, results_list_monotonic_nsample[[17]]$sim_results$power)/5), - (sum(results_list_monotonic_nsample[[2]]$sim_results$power, results_list_monotonic_nsample[[6]]$sim_results$power, results_list_monotonic_nsample[[10]]$sim_results$power, results_list_monotonic_nsample[[14]]$sim_results$power, results_list_monotonic_nsample[[18]]$sim_results$power)/5), - (sum(results_list_monotonic_nsample[[3]]$sim_results$power, results_list_monotonic_nsample[[7]]$sim_results$power, results_list_monotonic_nsample[[11]]$sim_results$power, results_list_monotonic_nsample[[15]]$sim_results$power, results_list_monotonic_nsample[[19]]$sim_results$power)/5), - (sum(results_list_monotonic_nsample[[4]]$sim_results$power, results_list_monotonic_nsample[[8]]$sim_results$power, results_list_monotonic_nsample[[12]]$sim_results$power, results_list_monotonic_nsample[[16]]$sim_results$power, results_list_monotonic_nsample[[20]]$sim_results$power)/5))) - - -kable(results_monotonic_MCP_nsample)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different sample sizes" = 7), font_size = 15, bold = TRUE)%>% - add_header_above(c("Monotonic scenario" = 7), font_size = 15, bold = TRUE) - - -``` - -## Non-monotonic scenario - -### varying expected effect for maximum dose - -For the simulations of the non-monotonic scenario, the R package 'Dosefinding' was used instead of 'MCPModPack - -```{r} -# linear with DoseFinding package -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(linear = NULL) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.05,0.1,0.2,0.3,0.5), sd = c(0.4), n_total = c(200)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_linear <- result$linear -``` - - -```{r} -# emax with DoseFinding package -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(emax = 0.6666667) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.05,0.1,0.2,0.3,0.5), sd = c(0.4), n_total = c(200)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_emax <- result$`emax (ED50=0.6666667)` -``` - - -```{r} -# sigemax with DoseFinding package -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(sigEmax = rbind(c(1.528629, 4.087463))) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.05,0.1,0.2,0.3,0.5), sd = c(0.4), n_total = c(200)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_sigemax <- result$`sigEmax (ED50=1.528629,h=4.087463)` -``` - - - -```{r} -#quadratic with DoseFinding package -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(quadratic = -0.09688711) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.05,0.1,0.2,0.3,0.5), sd = c(0.4), n_total = c(200)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_quadratic <- result$`quadratic (delta=-0.09688711)` - -``` - -```{r} -# beta with DoseFinding package -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(betaMod = rbind(c(1.396434, 1.040978))) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.05,0.1,0.2,0.3,0.5), sd = c(0.4), n_total = c(200)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_beta <- result$`betaMod (delta1=1.396434,delta2=1.040978,scal=9.6)` -``` - - -```{r} -#linear, emax, sigemax, quadratic -results_non_monotonic_MCP <- data.table( - Max_eff = c(0.05, 0.1, 0.2, 0.3, 0.5), - linear = power_non_monotonic_linear, - emax = power_non_monotonic_emax, - sigEmax = power_non_monotonic_sigemax, - quadratic = power_non_monotonic_quadratic, - beta = power_non_monotonic_beta, - average = c((sum(power_non_monotonic_linear[1],power_non_monotonic_emax[1], power_non_monotonic_sigemax[1], power_non_monotonic_quadratic[1], power_non_monotonic_beta[1])/5), - (sum(power_non_monotonic_linear[2],power_non_monotonic_emax[2], power_non_monotonic_sigemax[2], power_non_monotonic_quadratic[2], power_non_monotonic_beta[2])/5), - (sum(power_non_monotonic_linear[3],power_non_monotonic_emax[3], power_non_monotonic_sigemax[3], power_non_monotonic_quadratic[3], power_non_monotonic_beta[3])/5), - (sum(power_non_monotonic_linear[4],power_non_monotonic_emax[4], power_non_monotonic_sigemax[4], power_non_monotonic_quadratic[4], power_non_monotonic_beta[4])/5), - (sum(power_non_monotonic_linear[5],power_non_monotonic_emax[5], power_non_monotonic_sigemax[5], power_non_monotonic_quadratic[5], power_non_monotonic_beta[5])/5)) - ) - - -kable(results_non_monotonic_MCP)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects" = 7), font_size = 15, bold = TRUE)%>% - add_header_above(c("Non-monotonic scenario" = 7), font_size = 15, bold = TRUE) -``` - -### varying sample size - -```{r warning=FALSE} - - -# store results -power_non_monotonic_nsample_linear <- vector() -power_non_monotonic_nsample_emax <- vector() -power_non_monotonic_nsample_quadratic <- vector() -power_non_monotonic_nsample_sigemax <- vector() -power_non_monotonic_nsample_beta <- vector() - - -``` - -```{r} -# linear with DoseFinding -# allocation = c(1,1,1,1,1) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(linear = NULL) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(150,180)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_linear[2] <- result$linear[1] -power_non_monotonic_nsample_linear[4] <- result$linear[2] - -# allocation = c(2,1,1,1,2) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(2,1,1,1,2) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(linear = NULL) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(140,168)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_linear[1] <- result$linear[1] -power_non_monotonic_nsample_linear[3] <- result$linear[2] -``` - - -```{r} - -# emax with DoseFinding -#allocation = c(1,1,1,1,1) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(emax = 0.6666667) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(150,180)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_emax[2] <- result$`emax (ED50=0.6666667)`[1] -power_non_monotonic_nsample_emax[4] <- result$`emax (ED50=0.6666667)`[2] - -# allocation = c(2,1,1,1,2) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(2,1,1,1,2) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(emax = 0.6666667) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(140,168)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_emax[1] <- result$`emax (ED50=0.6666667)`[1] -power_non_monotonic_nsample_emax[3] <- result$`emax (ED50=0.6666667)`[2] -``` - - - -```{r} -# sigemax with DoseFinding -#allocation = c(1,1,1,1,1) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(sigEmax = rbind(c(1.528629, 4.087463))) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(150,180)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_sigemax[2] <- result$`sigEmax (ED50=1.528629,h=4.087463)`[1] -power_non_monotonic_nsample_sigemax[4] <- result$`sigEmax (ED50=1.528629,h=4.087463)`[2] - -# allocation = c(2,1,1,1,2) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(2,1,1,1,2) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(sigEmax = rbind(c(1.528629, 4.087463))) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(140,168)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_sigemax[1] <- result$`sigEmax (ED50=1.528629,h=4.087463)`[1] -power_non_monotonic_nsample_sigemax[3] <- result$`sigEmax (ED50=1.528629,h=4.087463)`[2] -``` - - - - -```{r} -# quadratic with DoseFinding -#allocation = c(1,1,1,1,1) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(quadratic = -0.09688711) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(150,180)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_quadratic[2] <- result$`quadratic (delta=-0.09688711)`[1] -power_non_monotonic_nsample_quadratic[4] <- result$`quadratic (delta=-0.09688711)`[2] - -# allocation = c(2,1,1,1,2) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(2,1,1,1,2) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(quadratic = -0.09688711) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(140,168)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_quadratic[1] <- result$`quadratic (delta=-0.09688711)`[1] -power_non_monotonic_nsample_quadratic[3] <- result$`quadratic (delta=-0.09688711)`[2] -``` - - - -```{r} -# beta with DoseFinding -#allocation = c(1,1,1,1,1) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(1,1,1,1,1) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(betaMod = rbind(c(1.396434, 1.040978))) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(150,180)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_beta[2] <- result$`betaMod (delta1=1.396434,delta2=1.040978,scal=9.6)`[1] -power_non_monotonic_nsample_beta[4] <- result$`betaMod (delta1=1.396434,delta2=1.040978,scal=9.6)`[2] - -# allocation = c(2,1,1,1,2) -mvt_control <- mvtnorm.control(maxpts = 30000, abseps = 0.001, releps = 0) -doses <- c(0,1,2,4,8) -allocation <- c(2,1,1,1,2) -alpha <- 0.05 -addArgs <- list(off = 0.08, scal = 9.6) - -resp_mod_list <- list(betaMod = rbind(c(1.396434, 1.040978))) -cand_mod_list <- list(linear = NULL, emax = 0.6666667, quadratic = -0.09688711, betaMod = rbind(c(1.396434, 1.040978)), sigEmax = rbind(c(1.528629, 4.087463))) -mods <- do.call(Mods, append(cand_mod_list, - list(maxEff = 1, doses = doses, placEff = 0, addArgs = addArgs))) -cont_mat <- optContr(mods, w = allocation) - -grd <- expand.grid(max_eff = c(0.2), sd = c(0.4), n_total = c(140,168)) -power_list <- vector("list", nrow(grd)) -dat_n <- t(sapply(grd$n_total, function(n) round(n*allocation/sum(allocation)))) -colnames(dat_n) <- paste0("D", doses) -for (i in 1:nrow(grd)) { - ###### - mods <- do.call(Mods, append(resp_mod_list, list(maxEff = grd$max_eff[i], doses = doses, - placEff = 0, addArgs = addArgs))) - n <- dat_n[i, ] - power_list[[i]] <- powMCT(cont_mat, alpha, mods, n, grd$sd[i], control = mvt_control) -} -power_df <- as.data.frame(do.call("rbind", power_list)) -mu <- getResp(mods) -parList <- attr(mu, 'parList') -mod_nams <- DoseFinding:::getModNams(parList) -colnames(power_df) <- mod_nams -power_df[['mean power']] <- apply(power_df, 1, mean) -result <- cbind(grd, power_df, dat_n) -power_non_monotonic_nsample_beta[1] <- result$`betaMod (delta1=1.396434,delta2=1.040978,scal=9.6)`[1] -power_non_monotonic_nsample_beta[3] <- result$`betaMod (delta1=1.396434,delta2=1.040978,scal=9.6)`[2] -``` - - -```{r} -#store results -results_non_monotonic_MCP_nsample <- data.table( - N_sample = c("(40,20,20,20,40)","(30,30,30,30,30)", "(48,24,24,24,48)","(36,36,36,36,36)"), - Linear = power_non_monotonic_nsample_linear, - Emax = power_non_monotonic_nsample_emax, - sigEmax = power_non_monotonic_nsample_sigemax, - quadratic = power_non_monotonic_nsample_quadratic, - beta = power_non_monotonic_nsample_beta -) - - -kable(results_non_monotonic_MCP_nsample)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different sample sizes" = 6), font_size = 15, bold = TRUE)%>% - add_header_above(c("Non-monotonic scenario" = 6), font_size = 15, bold = TRUE) - -``` - -## Variability scenario -### varying expected effect for maximum dose - -```{r warning=FALSE} -# assumed dose - response model -sim_models_var_linear = list(linear = NA, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim_var, length(doses_var)), - placebo_effect = plc.guess) - -sim_models_var_exp = list( exponential = 4.447149, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim_var, length(doses_var)), - placebo_effect = plc.guess) - -sim_models_var_emax = list(emax = 0.6666667, - max_effect = c(0.05,0.1,0.2,0.3,0.5), - sd = rep(sd.sim_var, length(doses_var)), - placebo_effect = plc.guess) - - - -# Simulation parameters -sim_parameters_var = list(n = Nsample_var, - doses = doses_var, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim) - -# perform Simulations - -list_models <- list(sim_models_var_linear, sim_models_var_exp, sim_models_var_emax) -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_var <- foreach(k = chunks, .combine = c) %dorng% { - - lapply(k, function (i) { - - func_sim(var_modelsPack, list_models[[i]], sim_parameters_var) - - }) - -} - - -# store results -results_var_MCP <- data.table( - Max_eff = c(0.05, 0.1, 0.2, 0.3, 0.5), - linear = results_list_var[[1]]$sim_results$power, - exponential = results_list_var[[2]]$sim_results$power, - emax = results_list_var[[3]]$sim_results$power, - average = c((sum(results_list_var[[1]]$sim_results$power[1], results_list_var[[2]]$sim_results$power[1],results_list_var[[3]]$sim_results$power[1])/3), - (sum(results_list_var[[1]]$sim_results$power[2], results_list_var[[2]]$sim_results$power[2],results_list_var[[3]]$sim_results$power[2])/3), - (sum(results_list_var[[1]]$sim_results$power[3], results_list_var[[2]]$sim_results$power[3],results_list_var[[3]]$sim_results$power[3])/3), - (sum(results_list_var[[1]]$sim_results$power[4], results_list_var[[2]]$sim_results$power[4],results_list_var[[3]]$sim_results$power[4])/3), - (sum(results_list_var[[1]]$sim_results$power[5], results_list_var[[2]]$sim_results$power[5],results_list_var[[3]]$sim_results$power[5])/3)) -) - -kable(results_var_MCP)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects" = 5), font_size = 15, bold = TRUE)%>% - add_header_above(c("Variability scenario" = 5), font_size = 15, bold = TRUE) - - -``` - -### varying sample size -```{r warning=FALSE} -# assume maximum effect = 0.2 -sim_models_var_linear$max_effect <- expectedEffect_fix -sim_models_var_exp$max_effect <- expectedEffect_fix -sim_models_var_emax$max_effect <- expectedEffect_fix - - -# store results -power_var_nsample_linear <- vector() -power_var_nsample_exp <- vector() -power_var_nsample_emax <- vector() - -# # Loop over nsample values -# for (i in seq_along(nsample_list_var)) { -# -# # Update nsample in simulation parameters -# sim_parameters_var$n <- nsample_list_var[[i]] -# -# -# # power values for different assumed true models -# power_nsample_linear <- func_sim(var_modelsPack, sim_models_var_linear, sim_parameters_var) -# power_var_nsample_linear[i] <- power_nsample_linear$sim_results$power -# -# power_nsample_exp <- func_sim(var_modelsPack, sim_models_var_exp, sim_parameters_var) -# power_var_nsample_exp[i] <- power_nsample_exp$sim_results$power -# -# power_nsample_emax <- func_sim(var_modelsPack, sim_models_var_emax, sim_parameters_var) -# power_var_nsample_emax[i] <-power_nsample_emax$sim_results$power -# -# -# } - -var_modelsPack$max_effect <- expectedEffect_fix -#list of different samole sizes -nsample_list_var = list(20*c(2,1,1,2),30*c(1,1,1,1), 24*c(2,1,1,2),36*c(1,1,1,1), - 20*c(2,1,1,2),30*c(1,1,1,1), 24*c(2,1,1,2),36*c(1,1,1,1), - 20*c(2,1,1,2),30*c(1,1,1,1), 24*c(2,1,1,2),36*c(1,1,1,1)) - -list_models <- list(sim_models_var_linear, sim_models_var_linear, sim_models_var_linear, sim_models_var_linear, - sim_models_var_exp, sim_models_var_exp, sim_models_var_exp, sim_models_var_exp, - sim_models_var_emax, sim_models_var_emax, sim_models_var_emax, sim_models_var_emax) - - -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_var_nsample <- foreach(k = chunks, .combine = c, .export = c(as.character(var_modelsPack), as.character(nsample_list))) %dorng% { - lapply(k, function (i) { - sim_parameters = list(n = nsample_list_var[[i]], - doses = doses_var, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim) - - - func_sim(var_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - - -results_var_MCP_nsample <- data.table( - N_sample = c("(40,20,20,20,40)","(30,30,30,30,30)", "(48,24,24,24,48)","(36,36,36,36,36)"), - Linear = c(results_list_var_nsample[[1]]$sim_results$power, results_list_var_nsample[[2]]$sim_results$power, results_list_var_nsample[[3]]$sim_results$power, results_list_var_nsample[[4]]$sim_results$power), - Exponential =c(results_list_var_nsample[[5]]$sim_results$power, results_list_var_nsample[[6]]$sim_results$power, results_list_var_nsample[[7]]$sim_results$power, results_list_var_nsample[[8]]$sim_results$power), - Emax = c(results_list_var_nsample[[9]]$sim_results$power, results_list_var_nsample[[10]]$sim_results$power, results_list_var_nsample[[11]]$sim_results$power, results_list_var_nsample[[12]]$sim_results$power), - Average = c((sum(results_list_var_nsample[[1]]$sim_results$power, results_list_var_nsample[[5]]$sim_results$power, results_list_var_nsample[[9]]$sim_results$power)/3), - (sum(results_list_var_nsample[[2]]$sim_results$power, results_list_var_nsample[[6]]$sim_results$power, results_list_var_nsample[[10]]$sim_results$power)/3), - (sum(results_list_var_nsample[[3]]$sim_results$power, results_list_var_nsample[[7]]$sim_results$power, results_list_var_nsample[[11]]$sim_results$power)/3), - (sum(results_list_var_nsample[[4]]$sim_results$power, results_list_var_nsample[[8]]$sim_results$power, results_list_var_nsample[[12]]$sim_results$power)/3)) -) - -kable(results_var_MCP_nsample)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different sample sizes" = 5), font_size = 15, bold = TRUE)%>% - add_header_above(c("Variability scenario" = 5), font_size = 15, bold = TRUE) - -``` \ No newline at end of file diff --git a/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_convergence.qmd b/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_convergence.qmd deleted file mode 100644 index 2b6e163..0000000 --- a/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_convergence.qmd +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: "Vignette BayesianMCPMod - convergence of power values" -subtitle: "WORK IN PROGRESS" -date: today -format: - html: - fig-height: 3.5 - self-contained: true - toc: true - number-sections: true - #bibliography: references.bib -vignette: > - %\VignetteIndexEntry{Simulation Example of Bayesian MCPMod and MCPMod} - %\VignetteEncoding{UTF-8} - %\VignetteEngine{quarto::html} ---- - - -```{r} -# minimal BayesianMCPMod - -# Define a vector of different n_sim values -n_sim_values_list <- as.list(c(100, 500, 1000, 2500, 5000)) -n_sim_values <- c(100, 500, 1000, 2500, 5000) - -# Initialize a list to store the results -results_list_nsim_Bay <- list() - -min_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing") -#optimal contrasts -contM <- getContr(mods = min_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - -chunks <- chunkVector(seq_along(n_sim_values_list), getDoParWorkers()) - - -results_list_nsim_Bay <- foreach(k = chunks, .combine = c, .export = c(as.character(min_models), as.character(contM))) %dorng% { - - lapply(k, function (i) { - - - # Simulation step - success_probabilities_min <- assessDesign( - n_patients = Nsample, - mods = min_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim_values_list[[i]], - alpha_crit_val = alpha, - contr = contM) - - }) - -} - -results_nsim_Bay <- extract_success_rates_nsim(results_list_nsim_Bay, min_scenario, n_sim_values) - - -``` - -```{r warning=FALSE} -# minimal MCPModPack - -# assumed dose - response model -sim_models_min_linear = list(linear = NA, - max_effect = expectedEffect_fix, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_min_exp = list(exponential = 4.447149, - max_effect = expectedEffect_fix, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_min_emax = list( emax = 0.6666667, - max_effect = expectedEffect_fix, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - - - - -list_models <- list(sim_models_min_linear, sim_models_min_linear, sim_models_min_linear, sim_models_min_linear, sim_models_min_linear, - sim_models_min_exp, sim_models_min_exp, sim_models_min_exp, sim_models_min_exp, sim_models_min_exp, - sim_models_min_emax, sim_models_min_emax, sim_models_min_emax, sim_models_min_emax, sim_models_min_emax) -n_sim_values_list <- as.list(c(100, 500, 1000, 2500, 5000, 100, 500, 1000, 2500, 5000, 100, 500, 1000, 2500, 5000)) - -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_nsim_MCP <- foreach(k = chunks, .combine = c, .export = c(as.character(min_modelsPack))) %dorng% { - - lapply(k, function (i) { - # Simulation parameters - sim_parameters = list(n = Nsample, - doses = doses.sim, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim_values_list[[i]]) - - func_sim(min_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - - - -``` - - - - -```{r} -# monotonic BayesianMCPMod - -# Initialize a list to store the results - results_list_nsim_Bay_monotonic <- list() - -monotonic_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - logistic=logit.g, - sigEmax = sigEmax.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing") - -#optimal contrasts -contM <- getContr(mods = monotonic_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - -n_sim_values_list <- as.list(c(100, 500, 1000, 2500, 5000)) - -chunks <- chunkVector(seq_along(n_sim_values_list), getDoParWorkers()) - - -results_list_nsim_Bay_monotonic <- foreach(k = chunks, .combine = c, .export = c(as.character(monotonic_models), as.character(contM))) %dorng% { - - lapply(k, function (i) { - - - # Simulation step - success_probabilities_min <- assessDesign( - n_patients = Nsample, - mods = monotonic_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim_values_list[[i]], - alpha_crit_val = alpha, - contr = contM) - - }) - -} - - - -results_nsim_Bay_monotonic <- extract_success_rates_nsim(results_list_nsim_Bay_monotonic, monotonic_scenario, n_sim_values) - -``` - -```{r warning=FALSE} -# monotonic MCPModPack - -sim_models_monotonic_linear$max_effect <- expectedEffect_fix -sim_models_monotonic_exp$max_effect <- expectedEffect_fix -sim_models_monotonic_emax$max_effect <- expectedEffect_fix -sim_models_monotonic_logistic$max_effect <- expectedEffect_fix -sim_models_monotonic_sigemax$max_effect <- expectedEffect_fix - -list_models <- list(sim_models_monotonic_linear, sim_models_monotonic_linear, sim_models_monotonic_linear, sim_models_monotonic_linear, sim_models_monotonic_linear, - sim_models_monotonic_exp, sim_models_monotonic_exp, sim_models_monotonic_exp, sim_models_monotonic_exp, sim_models_monotonic_exp, - sim_models_monotonic_emax, sim_models_monotonic_emax, sim_models_monotonic_emax, sim_models_monotonic_emax, sim_models_monotonic_emax, - sim_models_monotonic_logistic, sim_models_monotonic_logistic, sim_models_monotonic_logistic, sim_models_monotonic_logistic, sim_models_monotonic_logistic, - sim_models_monotonic_sigemax, sim_models_monotonic_sigemax, sim_models_monotonic_sigemax, sim_models_monotonic_sigemax,sim_models_monotonic_sigemax - ) -n_sim_values_list <- as.list(c(100, 500, 1000, 2500, 5000, - 100, 500, 1000, 2500, 5000, - 100, 500, 1000, 2500, 5000, - 100, 500, 1000, 2500, 5000, - 100, 500, 1000, 2500, 5000)) - -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_nsim_MCP_monotonic <- foreach(k = chunks, .combine = c, .export = c(as.character(monotonic_modelsPack))) %dorng% { - - lapply(k, function (i) { - # Simulation parameters - sim_parameters = list(n = Nsample, - doses = doses.sim, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim_values_list[[i]]) - - func_sim(monotonic_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - - -``` - -```{r} -# non-monotonic BayesianMCPMod - -# Initialize list to store results -results_list_nsim_Bay_non_monotonic <- list() - -non_monotonic_models <- Mods(linear=NULL, - emax = emax.g, - sigEmax = sigEmax.g, - quadratic = quad.g, - betaMod = beta.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing", - addArgs = list(scal=9.6)) - - #optimal contrasts -contM <- getContr(mods = non_monotonic_models, - dose_levels = doses.sim, - prior_list = uninf_prior_list, - dose_weights = c(1,1,1,1,1)) - -n_sim_values_list <- as.list(c(100, 500, 1000, 2500, 5000)) - -chunks <- chunkVector(seq_along(n_sim_values_list), getDoParWorkers()) - - -results_list_nsim_Bay_non_monotonic <- foreach(k = chunks, .combine = c, .export = c(as.character(non_monotonic_models), as.character(contM))) %dorng% { - - lapply(k, function (i) { - - - # Simulation step - success_probabilities_min <- assessDesign( - n_patients = Nsample, - mods = non_monotonic_models, - prior_list = uninf_prior_list, - sd = sd.sim, - n_sim = n_sim_values_list[[i]], - alpha_crit_val = alpha, - contr = contM) - - }) - -} - - - -results_nsim_Bay_non_monotonic <- extract_success_rates_nsim(results_list_nsim_Bay_non_monotonic, non_monotonic_scenario, n_sim_values) -``` - -```{r, warning=FALSE} -# non-monotonic MCPModPack - -results_list_nsim_MCP_non_monotonic = list( - power_non_monotonic_nsim_linear = vector(), - power_non_monotonic_nsim_emax = vector(), - power_non_monotonic_nsim_sigemax= vector() - #power_non_monotonic_nsim_quadratic = vector(), - #power_non_monotonic_nsim_beta = vector() - ) - -sim_models_non_monotonic_linear = list(linear = NA, - max_effect = 0.2, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_non_monotonic_emax= list(emax = 0.6666667, - max_effect = 0.2, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - -sim_models_non_monotonic_sigemax= list(sigemax = c(1.528629, 4.087463), - max_effect = 0.2, - sd = rep(sd.sim, length(doses.sim)), - placebo_effect = plc.guess) - - -sim_models_non_monotonic_linear$max_effect <- expectedEffect_fix -sim_models_non_monotonic_emax$max_effect <- expectedEffect_fix -sim_models_non_monotonic_sigemax$max_effect <- expectedEffect_fix -#sim_models_non_monotonic_quadratic$max_effect <- expectedEffect_fix -#sim_models_non_monotonic_beta$max_effect <- expectedEffect_fix - -list_models <- list(sim_models_non_monotonic_linear, sim_models_non_monotonic_linear, sim_models_non_monotonic_linear, sim_models_non_monotonic_linear, sim_models_non_monotonic_linear, - sim_models_non_monotonic_emax, sim_models_non_monotonic_emax, sim_models_non_monotonic_emax, sim_models_non_monotonic_emax, sim_models_non_monotonic_emax, - sim_models_non_monotonic_sigemax, sim_models_non_monotonic_sigemax, sim_models_non_monotonic_sigemax, sim_models_non_monotonic_sigemax,sim_models_non_monotonic_sigemax - ) -n_sim_values_list <- as.list(c(100, 500, 1000, 2500, 5000, - 100, 500, 1000, 2500, 5000, - 100, 500, 1000, 2500, 5000)) - -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_nsim_MCP_non_monotonic <- foreach(k = chunks, .combine = c, .export = c(as.character(non_monotonic_modelsPack))) %dorng% { - - lapply(k, function (i) { - # Simulation parameters - sim_parameters = list(n = Nsample, - doses = doses.sim, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim_values_list[[i]]) - - func_sim(non_monotonic_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - - -``` - -```{r} -# variability BayesianMCPMod - -# Initialize list to store results -results_list_nsim_Bay_var <- list() - -var_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - doses = doses_var, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing") - -contM <- getContr(mods = var_models, - dose_levels = doses_var, - prior_list = var_uninf_prior_list, - dose_weights = c(1,1,1,1)) - -n_sim_values_list <- as.list(c(100, 500, 1000, 2500, 5000)) - -chunks <- chunkVector(seq_along(n_sim_values_list), getDoParWorkers()) - - -results_list_nsim_Bay_var <- foreach(k = chunks, .combine = c, .export = c(as.character(var_models), as.character(contM))) %dorng% { - - lapply(k, function (i) { - - - # Simulation step - success_probabilities_min <- assessDesign( - n_patients = Nsample_var, - mods = var_models, - prior_list = var_uninf_prior_list, - sd = sd.sim_var, - n_sim = n_sim_values_list[[i]], - alpha_crit_val = alpha, - contr = contM) - - }) - -} - - - -results_nsim_Bay_var <- extract_success_rates_nsim(results_list_nsim_Bay_var, variability_scenario, n_sim_values) -``` - -```{r, warning=FALSE} -# variability MCPModPack - -results_list_nsim_MCP_var = list( - power_var_nsim_linear = vector(), - power_var_nsim_exp = vector(), - power_var_nsim_emax= vector()) - -# assume maximum effect = 0.2 -sim_models_var_linear$max_effect <- expectedEffect_fix -sim_models_var_exp$max_effect <- expectedEffect_fix -sim_models_var_emax$max_effect <- expectedEffect_fix - -sim_parameters_var$n <- Nsample_var - - -list_models <- list(sim_models_var_linear, sim_models_var_linear, sim_models_var_linear, sim_models_var_linear, sim_models_var_linear, - sim_models_var_exp, sim_models_var_exp, sim_models_var_exp, sim_models_var_exp, sim_models_var_exp, - sim_models_var_emax, sim_models_var_emax, sim_models_var_emax, sim_models_var_emax, sim_models_var_emax) -n_sim_values_list <- as.list(c(100, 500, 1000, 2500, 5000, - 100, 500, 1000, 2500, 5000, - 100, 500, 1000, 2500, 5000)) - -chunks <- chunkVector(seq_along(list_models), getDoParWorkers()) - -results_list_nsim_MCP_var <- foreach(k = chunks, .combine = c, .export = c(as.character(var_modelsPack))) %dorng% { - - lapply(k, function (i) { - # Simulation parameters - sim_parameters = list(n = Nsample_var, - doses = doses_var, - dropout_rate = 0.0, - go_threshold = 0.1, - nsims = n_sim_values_list[[i]]) - - func_sim(var_modelsPack, list_models[[i]], sim_parameters) - - }) - -} - - -``` \ No newline at end of file diff --git a/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_vignette.qmd b/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_vignette.qmd deleted file mode 100644 index 89b9525..0000000 --- a/vignettes/outdatet/Comparison_vignette_outdatet/Comparison_vignette.qmd +++ /dev/null @@ -1,1020 +0,0 @@ ---- -title: "Vignette BayesianMCPMod" -subtitle: "WORK IN PROGRESS" -date: today -format: - html: - fig-height: 3.5 - self-contained: true - toc: true - number-sections: true - #bibliography: references.bib -vignette: > - %\VignetteIndexEntry{Simulation Example of Bayesian MCPMod and MCPMod} - %\VignetteEncoding{UTF-8} - %\VignetteEngine{quarto::html} -editor: - markdown: - wrap: 72 ---- - -```{r} -#| code-summary: setup -#| code-fold: true -#| message: false -#| warning: false - -#' Display Parameters Table -#' -#' This function generates a markdown table displaying the names and values of parameters -#' from a named list. -#' -#' @param named_list A named list where each name represents a parameter name and the list -#' element represents the parameter value. Date values in the list are automatically -#' converted to character strings for display purposes. -#' -#' @return Prints a markdown table with two columns: "Parameter Name" and "Parameter Values". -#' The function does not return a value but displays the table directly to the output. -#' -#' @importFrom knitr kable -#' @examples -#' params <- list("Start Date" = as.Date("2020-01-01"), -#' "End Date" = as.Date("2020-12-31"), -#' "Threshold" = 10) -#' display_params_table(params) -#' -#' @export -display_params_table <- function(named_list) { - display_table <- data.frame() - value_names <- data.frame() - for (i in 1:length(named_list)) { - # dates will display as numeric by default, so convert to char first - if (class(named_list[[i]]) == "Date") { - named_list[[i]] = as.character(named_list[[i]]) - } - if (!is.null(names(named_list[[i]]))) { - value_names <- rbind(value_names, paste(names(named_list[[i]]), collapse = ', ')) - } - values <- data.frame(I(list(named_list[[i]]))) - display_table <- rbind(display_table, values) - } - - round_numeric <- function(x, digits = 3) { - if (is.numeric(x)) { - return(round(x, digits)) - } else { - return(x) - } - } - - display_table[1] <- lapply(display_table[1], function(sublist) { - lapply(sublist, round_numeric) - }) - - class(display_table[[1]]) <- "list" - - if (nrow(value_names) == 0) { - knitr::kable( - cbind(names(named_list), display_table), - col.names = c("Name", "Value") - ) - } else { - knitr::kable( - cbind(names(named_list), value_names, display_table), - col.names = c("Name", "Value Labels", "Value") - ) - } -} -# function to solve power results for tables (different max eff) BayesianMCPMod -# return(successrates models, average) -extract_success_rates <- function(results_list, models) { - success_rates <- list() - - for (i in seq_along(results_list)) { - success_rate <- c() - for (model in models) { - success_rate <- c(success_rate, attr(results_list[[i]][[model]]$BayesianMCP,"successRate")) - } - success_rates[[paste0("Bay_", attr(results_list[[i]],"maxEff"))]] <- c(success_rate, attr(results_list[[i]], "avgSuccessRate")) - } - - return(success_rates) - } - -# function to solve power results for tables (different nsample) BayesianMCPMod -#models % - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects " = length(scenario)+1), font_size = 15, bold = TRUE)%>% - add_header_above(c("BayesianMCPMod " = length(scenario)+1), font_size = 15, bold = TRUE) - - list(result_table = result_table, kable_result = kable_result) -} - - - -print_result_Bay_nsample <- function(results, scenario, variable) { - - result_table <- t(data.table( - Bay_1 = results$Bay_1, - Bay_2 = results$Bay_2, - Bay_3 = results$Bay_3, - Bay_4 = results$Bay_4)) - - result_table <- as.data.table(result_table) - names(result_table) <- scenario - #return(result_table) - - kable_result <- kable(cbind(variable, result_table))%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Success probability results different sample sizes " = length(scenario)+1), font_size = 15, bold = TRUE)%>% - add_header_above(c("BayesianMCPMod " = length(scenario)+1), font_size = 15, bold = TRUE) - - list(result_table = result_table, kable_result = kable_result) -} - -plot_power_deviation <- function(data, x, xlab, xlim){ - plot <- ggplot2::ggplot(data, aes(x = x, y = value, color = variable, group = variable))+ - geom_point()+ - geom_line() + - scale_x_continuous(breaks = xlim) + - geom_hline(aes(yintercept = 0), linetype = 2)+ - scale_color_manual(name = "assumed true model", values = c("linear" = "red", "exponential" = "blue", "emax" = "darkgreen", "logistic" = "orange", "sigemax" = "purple", "beta" = "deepskyblue", "quadratic" = "deeppink"))+ - labs(x = xlab, y = "power deviation")+ - ylim(-0.2, 0.2)+ #with higher n_sim!!!!!!!!!!!! - theme_classic() - return(plot) -} - -#plot all max effects in one plot - -plot_max_eff <- function(results_MCP, results_Bay, scenario){ - -transformed_MCP <- t(results_MCP) -transformed_MCP <- as.data.table(transformed_MCP) -transformed_MCP <- transformed_MCP[-c(1),] - -data <- data.table( - Bay_0.05 = results_Bay$Bay_0.05, - Bay_0.1 = results_Bay$Bay_0.1, - Bay_0.2 = results_Bay$Bay_0.2, - Bay_0.3 = results_Bay$Bay_0.3, - Bay_0.5 = results_Bay$Bay_0.5, - MCP_0.05 = transformed_MCP$V1, - MCP_0.1 = transformed_MCP$V2, - MCP_0.2 = transformed_MCP$V3, - MCP_0.3 = transformed_MCP$V4, - MCP_0.5 = transformed_MCP$V5, - scenario = c(scenario, "average") -) - - - - - plot_max_eff <- ggplot(data = data)+ - geom_point(mapping = aes(x= data$scenario, y = data$Bay_0.05, colour = "MCPMod", shape = "0.05"), data = data, position = position_nudge(x = -0.15))+ - geom_point(mapping = aes(x= data$scenario, y = data$MCP_0.05, colour = "BayesianMCPMod", shape = "0.05"), data = data, position = position_nudge(x = 0.15))+ - geom_point(mapping = aes(x= data$scenario, y = data$MCP_0.1, colour = "MCPMod", shape = "0.1"), data = data, position = position_nudge(x = -0.15))+ - geom_point(mapping = aes(x= scenario, y = data$Bay_0.1, colour = "BayesianMCPMod", shape = "0.1"), data = data, position = position_nudge(x = 0.15))+ - geom_point(mapping = aes(x= scenario, y = data$MCP_0.2, colour = "MCPMod", shape = "0.2"), data = data, position = position_nudge(x = -0.15))+ - geom_point(mapping = aes(x= scenario, y = data$Bay_0.2, colour = "BayesianMCPMod", shape = "0.2"), data = data, position = position_nudge(x = 0.15))+ - geom_point(mapping = aes(x= scenario, y = data$MCP_0.3, colour = "MCPMod", shape = "0.3"), data = data, position = position_nudge(x = -0.15))+ - geom_point(mapping = aes(x= scenario, y = data$Bay_0.3, colour = "BayesianMCPMod", shape = "0.3"), data = data, position = position_nudge(x = 0.15))+ - geom_point(mapping = aes(x= scenario, y = data$MCP_0.5, colour = "MCPMod", shape = "0.5"), data = data, position = position_nudge(x = -0.15))+ - geom_point(mapping = aes(x= scenario, y = data$Bay_0.5, colour = "BayesianMCPMod", shape = "0.5"), data = data, position = position_nudge(x = 0.15))+ - scale_color_manual(name = "Legend", values = c("MCPMod" = 'blue', "BayesianMCPMod" = "red"))+ - scale_shape_manual(name = "Expected effects", values = c("0.05" = 0, "0.1" = 1, "0.2" = 2, "0.3" = 3, "0.5" = 5))+ - ylim(0, 1)+ - ylab("power")+ - xlab("assumed true model")+ - labs(title = "Power for different models and differen exptected effects for maximum dose")+ - theme_bw() - - plot_max_eff - -} - - - -# parallel---------------------------------------------- -chunkVector <- function (x, n_chunks) { - if (n_chunks <= 1) { - chunk_list <- list(x) - } else { - chunk_list <- unname(split(x, cut(seq_along(x), n_chunks, labels = FALSE))) - } - return(chunk_list) -} - -devtools::load_all() -#library(BayesianMCPMod) -library(RBesT) -library(clinDR) -library(dplyr) -library(tibble) -library(reactable) -library(DoseFinding) -library(MCPModPack) -library(kableExtra) -library(data.table) -library(doFuture) -library(doRNG) - -registerDoFuture() -plan(multisession) - - - -set.seed(7015) -``` - -# Introduction - -This vignette demonstrates the application of the {BayesianMCPMod} -package for sample size calculations and the comparison with the -{MCPModPack} package. As for other bayesian approaches BMCPMod is able -to mimic the results of the frequentist MCPMod for non-informative -priors and this is shown here for the sample size planning of different -scenarios. - -In order to test and compare BayesianMCPMod and MCPModPack sample size -calculations in different scenarios, the data used for the comparisons -will be simulated multiple times using different input values. Every -scenario will be named to make the differentiation between each scenario -simpler. Altogether, four different scenarios were simulated. The first -three scenarios are pretty similiar, they only differ in the pre-defined -set of candidate models M. The other design choices, like the number of -dose levels or sample size allocation stay the same between the -scenarios. For the first three scenarios, these design choices are: - -- four dose levels plus placebo (0 mg, 1 mg, 2 mg, 4 mg, 8 mg) - -- total sample size of N = 200 - -- equal allocation ratio for each dose group - -- expected effect for maximum dose of 0.2 mg - -- standard deviation of 0.4 for every dose group - -The fourth scenario, the variability scenario, has some different design -choices: - three dose levels plus placebo (0 mg, 1 mg, 4 mg, 8 mg) - -total sample size of N = 100 - equal allocation ratio for each dose -group - expected effect for maximum dose of 0.2 mg - standard deviation -of 0.4 for every dose group - -Building on the initial scenarios, we further delve into the exploration -of varying input parameters. The varying parameters are: - -- expected effect for maximum dose of (0.05 mg, 0.1 mg, 0.2 mg, 0.3 - mg, 0.5 mg) - -- different total sample size with different allocation: - -- (40,20,20,20,40), (30,30,30,30,30), (48,24,24,24,48), - (36,36,36,36,36) (for the first three scenarios) - -- (40,20,20,40), (30,30,30,30), (48,24,24,48), (36,36,36,36) (for the - variability scenario) - -In a subsequent step, we extend our analysis to test the convergence of -power values as the number of simulations increases. For this part of -the study, we assume an expected effect for the maximum dose of 0.2 mg. -The sample size is set to 200 for the linear, monotonic and -non-monotonic scenario and 100 for the variability scenario. This -further exploration allows us to deepen our understanding of the -BayesianMCPMod and MCPModPack packages and their performance under -varying conditions. - -```{r} -####### General assumptions ######### -# simulations -n_sim <- 1000 - -# Define the list of sample sizes -nsample_list = list(20*c(2,1,1,1,2),30*c(1,1,1,1,1), 24*c(2,1,1,1,2),36*c(1,1,1,1,1)) -nsample_vector <- c("(40,20,20,20,40)", "(30,30,30,30,30)", "(48,24,24,24,48)", "(36,36,36,36,36)") - -# define input parameters -doses.sim <- c(0,1,2,4,8) # dose levels -max.dose <- max(doses.sim) # specify max dose possibly available -plc.guess <- 0 # expected placebo effect -Nsample <-c(40,40,40,40,40) #,36*c(1,1,1,1)) #list(20*c(2,1,1,2),30*c(1,1,1,1), 24*c(2,1,1,2),36*c(1,1,1,1)) -expectedEffect_fix <- 0.2 #c(0.05,0.1,0.2,0.3,0.5) -expectedEffect <- c(0.05,0.1,0.2,0.3,0.5) -sd.sim <- c(0.4) - -#input paramters variability scenario -doses_var <- c(0, 1, 4, 8) -sd.sim_var <- 0.4 -Nsample_var <- c(25, 25, 25, 25) -nsample_list_var <- list(20*c(2,1,1,2),30*c(1,1,1,1), 24*c(2,1,1,2),36*c(1,1,1,1)) - - -# define model functions -emax.g <- guesst(d = doses.sim[2], p = 0.6, "emax") -exp.g <- guesst(d=doses.sim[2],p=0.05,model="exponential", Maxd=max.dose) -logit.g <- guesst(d=c(doses.sim[2],doses.sim[3]),p=c(0.1,0.9),"logistic",Maxd=max.dose) -sigEmax.g <- guesst(d=c(doses.sim[2],doses.sim[3]),p=c(0.15,0.75), model = "sigEmax") -quad.g <- guesst(d=doses.sim[2],p=0.35,model="quadratic") -beta.g <- guesst(d=doses.sim[2],p=0.2,model="betaMod", dMax=5.5, scal=9.6, Maxd=max.dose) - -### define models to be tested in MCPMod -models <- Mods(linear=NULL, - emax = emax.g, - exponential = exp.g, - logistic=logit.g, - sigEmax = sigEmax.g, - quadratic = quad.g, - betaMod = beta.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing", - addArgs = list(scal=9.6) -) - -alpha <- 0.05 - -# display_params_table(models) - - -# kable(t(as.data.table(cbind(doses.sim, Nsample))))%>% -# kable_classic(full_width = TRUE)%>% -# add_header_above(c( "Doses" = 6), font_size = 15, bold = TRUE) -# -# -# kable(t(data.table(placebo_guess = plc.guess, -# sd = sd.sim[1], -# alpha = alpha )))%>% -# kable_classic(full_width = TRUE) - -``` - -## Scenarios - -In the following the candidate models for the different scenarios are -plotted. - -```{r} - -#Specification of considered models for different scenarios for BayesianMCPMod - -min_scenario <- c("linear", "exponential", "emax") -min_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing" -) -plot(min_models, main = "minimal Scenario") - - -monotonic_scenario <- c("linear", "exponential", "emax", "logistic", "sigEmax") - -monotonic_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - logistic=logit.g, - sigEmax = sigEmax.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing" -) -plot(monotonic_models, main = "monotonic Scenario") - - -non_monotonic_scenario <- c("linear", "emax", "sigEmax", "quadratic", "betaMod") - -non_monotonic_models <- Mods(linear=NULL, - emax = emax.g, - sigEmax = sigEmax.g, - quadratic = quad.g, - betaMod = beta.g, - doses = doses.sim, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing", - addArgs = list(scal=9.6) -) -plot(non_monotonic_models,main = "non monotonic Scenario") - - -variability_scenario <- c("linear","exponential", "emax") - -kable(t(as.data.table(cbind(doses_var, Nsample_var))))%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c( "Doses variability scenario" = 5), font_size = 15) - -var_models <- Mods(linear=NULL, - exponential = exp.g, - emax = emax.g, - doses = doses_var, - placEff = plc.guess, - maxEff = expectedEffect_fix, - direction = "increasing" -) -plot(var_models, main = "variability Scenario") - -``` - -# MCPModPack - -```{r} - -#Specification of considered models for different scenarios for MCPModPack - -min_modelsPack = list(linear = NA, - exponential = 4.447149, - emax = 0.6666667) - -monotonic_modelsPack = list(linear = NA, - exponential = 4.447149, - emax = 0.6666667, - logistic = c(1.5, 0.2275598), - sigEmax = c(1.528629, 4.087463)) - -non_monotonic_modelsPack = list(linear = NA, - emax = 0.6666667 , - sigEmax = c(1.528629, 4.087463), - quadratic = -0.09688711) -#beta - -var_modelsPack = list(linear = NA, - exponential = 4.447149, - emax = 0.6666667, - logistic = c(1.5, 0.2275598), - sigEmax = c(1.528629, 4.087463)) - -``` - -```{r child = 'Comparison_MCPModPack.qmd'} - -``` - -# BayesianMCPMod - -```{r child = 'Comparison_BayesianMCPMod.qmd'} - -``` - -# Comparison - -In the following, we will draw comparisons between the power values of -various scenarios and differnt parameters. - -## varying expected effect for maximum dose - -### minimal scenario - -```{r} - -plot_minimal_max_eff <- plot_max_eff(results_min_MCP, results_min_Bay, min_scenario) -plot_minimal_max_eff - -#table with results -kable(results_min_MCP)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects " = 5), font_size = 15, bold = TRUE)%>% - add_header_above(c("MCPModPack " = 5), font_size = 15, bold = TRUE) - -minimal_Bay$kable_result - -#difference of power values -results_min_max_eff <- data.table( - linear = as.vector(minimal_Bay$result_table$linear)-as.vector(results_min_MCP$linear), - exponential = as.vector(minimal_Bay$result_table$exponential)-as.vector(results_min_MCP$exponential), - emax = as.vector(minimal_Bay$result_table$emax)-as.vector(results_min_MCP$emax), - expectedEffect = expectedEffect -) - - - -results_min_max_eff <- melt(results_min_max_eff, id.vars = "expectedEffect") - -plot_minimal <- plot_power_deviation(results_min_max_eff, results_min_max_eff$expectedEffect, "assumed maximum effect", c(0.05,0.1,0.2,0.3,0.5)) -plot_minimal -``` - -```{r} - -``` - -### monotonic scenario - -```{r} -plot_monotonic_max_eff <- plot_max_eff(results_monotonic_MCP, results_monotonic_Bay, monotonic_scenario) -plot_monotonic_max_eff - -#table with results -kable(results_monotonic_MCP)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects " = 7), font_size = 15, bold = TRUE)%>% - add_header_above(c("MCPModPack " = 7), font_size = 15, bold = TRUE) - -monotonic_Bay$kable_result - - -results_monotonic_max_eff <- data.table( - linear = as.vector(monotonic_Bay$result_table$linear)-as.vector(results_monotonic_MCP$linear), - exponential = as.vector(monotonic_Bay$result_table$exponential)-as.vector(results_monotonic_MCP$exp), - emax = as.vector(monotonic_Bay$result_table$emax)-as.vector(results_monotonic_MCP$emax), - logistic = as.vector(monotonic_Bay$result_table$logistic)-as.vector(results_monotonic_MCP$logistic), - sigemax = as.vector(monotonic_Bay$result_table$sigEmax)-as.vector(results_monotonic_MCP$sigEmax), - expectedEffect = expectedEffect -) - -results_monotonic_max_eff <- melt(results_monotonic_max_eff, id.vars = "expectedEffect") - -plot_monotonic <- plot_power_deviation(results_monotonic_max_eff, results_monotonic_max_eff$expectedEffect, "assumed maximum effect", c(0.05,0.1,0.2,0.3,0.5)) -plot_monotonic -``` - -### non - monotonic scenario - -```{r} -plot_non_monotonic_max_eff <- plot_max_eff(results_non_monotonic_MCP, results_non_monotonic_Bay, non_monotonic_scenario) -plot_non_monotonic_max_eff - -# table results -kable(results_non_monotonic_MCP)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects" = 7), font_size = 15, bold = TRUE)%>% - add_header_above(c("MCPModPack" = 7), font_size = 15, bold = TRUE) - -non_monotonic_Bay$kable_result - -results_non_monotonic_max_eff <- data.table( - linear = as.vector(non_monotonic_Bay$result_table$linear)-as.vector(results_non_monotonic_MCP$linear), - emax = as.vector(non_monotonic_Bay$result_table$emax)-as.vector(results_non_monotonic_MCP$emax), - sigemax = as.vector(non_monotonic_Bay$result_table$sigEmax)-as.vector(results_non_monotonic_MCP$sigEmax), - quadratic = as.vector(non_monotonic_Bay$result_table$quadratic)-as.vector(results_non_monotonic_MCP$quadratic), - beta = as.vector(non_monotonic_Bay$result_table$beta)-as.vector(results_non_monotonic_MCP$beta), - expectedEffect = expectedEffect - ) - -results_non_monotonic_max_eff <- melt(results_non_monotonic_max_eff, id.vars = "expectedEffect") - -plot_non_monotonic <- plot_power_deviation(results_non_monotonic_max_eff, results_non_monotonic_max_eff$expectedEffect, "assumed maximum effect", c(0.05,0.1,0.2,0.3,0.5)) -plot_non_monotonic -``` - -### variability scenario - -```{r} -plot_var_max_eff <- plot_max_eff(results_var_MCP, results_variability_Bay, variability_scenario) -plot_var_max_eff - -#table with results -kable(results_var_MCP)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different expected effects" = 5), font_size = 15, bold = TRUE)%>% - add_header_above(c("MCPModPack" = 5), font_size = 15, bold = TRUE) - -variability_Bay$kable_result - -results_var_max_eff <- data.table( - linear = as.vector(variability_Bay$result_table$linear)-as.vector(results_var_MCP$linear), - exponential = as.vector(variability_Bay$result_table$exponential)-as.vector(results_var_MCP$exponential), - emax = as.vector(variability_Bay$result_table$emax)-as.vector(results_var_MCP$emax), - expectedEffect = expectedEffect -) - -results_var_max_eff <- melt(results_var_max_eff, id.vars = "expectedEffect") - -plot_var <- plot_power_deviation(results_var_max_eff, results_var_max_eff$expectedEffect, "assumed maximum effect", c(0.05,0.1,0.2,0.3,0.5)) -plot_var -``` - -## varying sample size - -```{r} -nsample_vector <- as.vector(c("(40, 20, 20, 20, 40)", "(30, 30, 30, 30, 30)", "(48, 24, 24, 24, 48)", "(36, 36, 36, 36, 36)")) -``` - -### minimal scenario - -```{r} - -# results_min_nsample <- data.table( -# linear = as.vector(minimal_nsample_Bay$result_table$linear)-as.vector(results_min_MCP_nsample$Linear), -# exponential = as.vector(minimal_nsample_Bay$result_table$exponential)-as.vector(results_min_MCP_nsample$Exponential), -# emax = as.vector(minimal_nsample_Bay$result_table$emax)-as.vector(results_min_MCP_nsample$Emax), -# nsample_vector = nsample_vector -# ) -# -# results_min_nsample <- melt(results_min_nsample, id.vars = "nsample_vector") -# -# plot_minimal_nsample <- plot_power_deviation(results_min_nsample, results_min_nsample$nsample_vector, "sample sizes") -# plot_minimal_nsample - -kable(results_min_MCP_nsample)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different sample sizes " = 5), font_size = 15, bold = TRUE)%>% - add_header_above(c("MCPModPack" = 5), font_size = 15, bold = TRUE) - -minimal_nsample_Bay$kable_result - - - - -data_plot_nsample_min <- data.frame( - sample_sizes = nsample_vector, - sample_sizes_num = c(1, 2, 3, 4), - start_linear = minimal_nsample_Bay$result_table$linear, - end_linear = results_min_MCP_nsample$Linear, - start_exp = minimal_nsample_Bay$result_table$exponential, - end_exp = results_min_MCP_nsample$Exponential, - start_emax = minimal_nsample_Bay$result_table$emax, - end_emax = results_min_MCP_nsample$Emax -) - - -# Create the plot -ggplot(data = data_plot_nsample_min, aes(x = sample_sizes_num)) + - geom_segment(aes(x = sample_sizes_num - 0.3, xend = sample_sizes_num - 0.3, y = start_linear, yend = end_linear, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.4, xend = sample_sizes_num - 0.2 , y = start_linear, yend = start_linear, color = "linear"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num - 0.05, xend = sample_sizes_num - 0.05 , y = start_exp, yend = end_exp, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.15, xend = sample_sizes_num + 0.05, y = start_exp, yend = start_exp, color = "exponential"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num + 0.2, xend = sample_sizes_num + 0.2, y = start_emax, yend = end_emax, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num + 0.1, xend = sample_sizes_num + 0.3, y = start_emax, yend = start_emax, color = "emax"), size = 0.5) + - scale_x_continuous(breaks = data_plot_nsample_min$sample_sizes_num, labels = data_plot_nsample_min$sample_sizes) + - scale_color_manual(name = "Assumed true model (BayesianMCPMod)", values = c("linear" = "red", "exponential" = "blue", "emax" = "darkgreen", "logistic" = "orange", "sigemax" = "purple", "beta" = "deepskyblue", "quadratic" = "deeppink", "MCPMod power deviation" = "grey"))+ - theme_minimal() + - ylab("Power") + - ylim(c(0.25,1))+ - xlab("sample sizes")+ - ggtitle("Power values different sample sizes") + - geom_vline(xintercept = data_plot_nsample_min$sample_sizes_num + 0.5, linetype="dashed", color = "black") + - theme(legend.position = "bottom") - - - - -``` - -### monotonic scenario - -```{r} -# results_monotonic_nsample <- data.table( -# linear = as.vector(monotonic_nsample_Bay$result_table$linear)-as.vector(results_monotonic_MCP_nsample$Linear), -# exponential = as.vector(monotonic_nsample_Bay$result_table$exponential)-as.vector(results_monotonic_MCP_nsample$Exponential), -# emax = as.vector(monotonic_nsample_Bay$result_table$emax)-as.vector(results_monotonic_MCP_nsample$Emax), -# logistic = as.vector(monotonic_nsample_Bay$result_table$logistic)-as.vector(results_monotonic_MCP_nsample$Logistic), -# sigemax = as.vector(monotonic_nsample_Bay$result_table$sigEmax)-as.vector(results_monotonic_MCP_nsample$sigEmax), -# nsample_vector = nsample_vector -# ) -# -# results_monotonic_nsample <- melt(results_monotonic_nsample, id.vars = "nsample_vector") -# -# plot_monotonic_nsample <- plot_power_deviation(results_monotonic_nsample, results_monotonic_nsample$nsample_vector, "sample sizes") -# plot_monotonic_nsample - - -kable(results_monotonic_MCP_nsample)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different sample sizes" = 7), font_size = 15, bold = TRUE)%>% - add_header_above(c("MCPModPack" = 7), font_size = 15, bold = TRUE) - -monotonic_nsample_Bay$kable_result - -data_plot_nsample_monotonic <- data.frame( - sample_sizes = nsample_vector, - sample_sizes_num = c(1, 2, 3, 4), - start_linear = monotonic_nsample_Bay$result_table$linear, - end_linear = results_monotonic_MCP_nsample$Linear, - start_exp = monotonic_nsample_Bay$result_table$exponential, - end_exp = results_monotonic_MCP_nsample$Exponential, - start_emax = monotonic_nsample_Bay$result_table$emax, - end_emax = results_monotonic_MCP_nsample$Emax, - start_logistic = monotonic_nsample_Bay$result_table$logistic, - end_logistic = results_monotonic_MCP_nsample$Logistic, - start_sigemax = monotonic_nsample_Bay$result_table$sigEmax, - end_sigemax = results_monotonic_MCP_nsample$sigEmax -) - - -# Create the plot -ggplot(data = data_plot_nsample_monotonic, aes(x = sample_sizes_num)) + - geom_segment(aes(x = sample_sizes_num - 0.4, xend = sample_sizes_num - 0.4, y = start_linear, yend = end_linear, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.45, xend = sample_sizes_num - 0.35 , y = start_linear, yend = start_linear, color = "linear"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num - 0.2, xend = sample_sizes_num - 0.2 , y = start_exp, yend = end_exp, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.25, xend = sample_sizes_num - 0.15, y = start_exp, yend = start_exp, color = "exponential"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num, xend = sample_sizes_num, y = start_emax, yend = end_emax, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.05, xend = sample_sizes_num + 0.05, y = start_emax, yend = start_emax, color = "emax"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num + 0.2, xend = sample_sizes_num + 0.2, y = start_logistic, yend = end_logistic, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num + 0.15, xend = sample_sizes_num + 0.25, y = start_logistic, yend = start_logistic, color = "logistic"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num + 0.4, xend = sample_sizes_num + 0.4, y = start_sigemax, yend = end_sigemax, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num + 0.35, xend = sample_sizes_num + 0.45, y = start_sigemax, yend = start_sigemax, color = "sigemax"), size = 0.5) + - scale_x_continuous(breaks = data_plot_nsample_monotonic$sample_sizes_num, labels = data_plot_nsample_monotonic$sample_sizes) + - scale_color_manual(name = "Assumed true model (BayesianMCPMod)", values = c("linear" = "red", "exponential" = "blue", "emax" = "darkgreen", "logistic" = "orange", "sigemax" = "purple", "beta" = "deepskyblue", "quadratic" = "deeppink", "MCPMod power deviation" = "grey"))+ - theme_minimal() + - ylab("Power") + - ylim(c(0.25,1))+ - xlab("sample sizes")+ - ggtitle("Power values different sample sizes")+ - geom_vline(xintercept = data_plot_nsample_min$sample_sizes_num + 0.5, linetype="dashed", color = "black") + - theme(legend.position = "bottom") - - -``` - -### non-monotonic scenario - -```{r} -# results_non_monotonic_nsample <- data.table( -# linear = as.vector(non_monotonic_nsample_Bay$result_table$linear)-as.vector(results_non_monotonic_MCP_nsample$Linear), -# emax = as.vector(non_monotonic_nsample_Bay$result_table$emax)-as.vector(results_non_monotonic_MCP_nsample$Emax), -# sigemax = as.vector(non_monotonic_nsample_Bay$result_table$sigEmax)-as.vector(results_non_monotonic_MCP_nsample$sigEmax), -# quadratic = as.vector(non_monotonic_nsample_Bay$result_table$quadratic)-as.vector(results_non_monotonic_MCP_nsample$quadratic), -# beta = as.vector(non_monotonic_nsample_Bay$result_table$betaMod) - as.vector(results_non_monotonic_MCP_nsample$beta), -# nsample_vector = nsample_vector -# ) -# -# results_non_monotonic_nsample <- melt(results_non_monotonic_nsample, id.vars = "nsample_vector") -# -# plot_non_monotonic_nsample <- plot_power_deviation(results_non_monotonic_nsample, results_non_monotonic_nsample$nsample_vector, "sample sizes") -# plot_non_monotonic_nsample - -kable(results_non_monotonic_MCP_nsample)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different sample sizes" = 6), font_size = 15, bold = TRUE)%>% - add_header_above(c("MCPModPack" = 6), font_size = 15, bold = TRUE) - -non_monotonic_nsample_Bay$kable_result - -data_plot_nsample_non_monotonic <- data.frame( - sample_sizes = nsample_vector, - sample_sizes_num = c(1, 2, 3, 4), - start_linear = non_monotonic_nsample_Bay$result_table$linear, - end_linear = results_non_monotonic_MCP_nsample$Linear, - start_emax = non_monotonic_nsample_Bay$result_table$emax, - end_emax = results_non_monotonic_MCP_nsample$Emax, - start_sigemax = non_monotonic_nsample_Bay$result_table$sigEmax, - end_sigemax = results_non_monotonic_MCP_nsample$sigEmax, - start_quadratic = non_monotonic_nsample_Bay$result_table$quadratic, - end_quadratic = results_non_monotonic_MCP_nsample$quadratic, - start_beta = non_monotonic_nsample_Bay$result_table$betaMod, - end_beta = results_non_monotonic_MCP_nsample$beta -) - - -# Create the plot -ggplot(data = data_plot_nsample_non_monotonic, aes(x = sample_sizes_num)) + - geom_segment(aes(x = sample_sizes_num - 0.4, xend = sample_sizes_num - 0.4, y = start_linear, yend = end_linear, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.45, xend = sample_sizes_num - 0.35 , y = start_linear, yend = start_linear, color = "linear"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num - 0.2, xend = sample_sizes_num - 0.2 , y = start_emax, yend = end_emax, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.25, xend = sample_sizes_num - 0.15, y = start_emax, yend = start_emax, color = "emax"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num, xend = sample_sizes_num, y = start_sigemax, yend = end_sigemax, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.05, xend = sample_sizes_num + 0.05, y = start_sigemax, yend = start_sigemax, color = "sigemax"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num + 0.1, xend = sample_sizes_num + 0.1, y = start_quadratic, yend = end_quadratic, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num + 0.05, xend = sample_sizes_num + 0.15, y = start_quadratic, yend = start_quadratic, color = "quadratic"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num + 0.3, xend = sample_sizes_num + 0.3, y = start_beta, yend = end_beta, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num + 0.25, xend = sample_sizes_num + 0.35, y = start_beta, yend = start_beta, color = "beta"), size = 0.5) + - scale_x_continuous(breaks = data_plot_nsample_non_monotonic$sample_sizes_num, labels = data_plot_nsample_non_monotonic$sample_sizes) + - scale_color_manual(name = "Assumed true model (BayesianMCPMod)", values = c("linear" = "red", "exponential" = "blue", "emax" = "darkgreen", "logistic" = "orange", "sigemax" = "purple", "beta" = "deepskyblue", "quadratic" = "deeppink", "MCPMod power deviation" = "grey"))+ - theme_minimal() + - ylab("Power") + - ylim(c(0.25,1))+ - xlab("sample sizes")+ - ggtitle("Power values different sample sizes")+ - geom_vline(xintercept = data_plot_nsample_min$sample_sizes_num + 0.5, linetype="dashed", color = "black") + - theme(legend.position = "bottom") - - - -``` - -### variability sceanrio - -```{r} -# results_var_nsample <- data.table( -# linear = as.vector(var_nsample_Bay$result_table$linear)-as.vector(results_var_MCP_nsample$Linear), -# exponential = as.vector(var_nsample_Bay$result_table$exponential)-as.vector(results_var_MCP_nsample$Exponential), -# emax = as.vector(var_nsample_Bay$result_table$emax)-as.vector(results_var_MCP_nsample$Emax), -# nsample_vector = nsample_vector -# ) -# -# results_var_nsample <- melt(results_var_nsample, id.vars = "nsample_vector") -# -# -# plot_var_nsample <- plot_power_deviation(results_var_nsample, results_var_nsample$nsample_vector, "sample sizes") -# plot_var_nsample - -kable(results_var_MCP_nsample)%>% - kable_classic(full_width = TRUE)%>% - add_header_above(c("Power results different sample sizes" = 5), font_size = 15, bold = TRUE)%>% - add_header_above(c("Variability scenario" = 5), font_size = 15, bold = TRUE) - -var_nsample_Bay$kable_result - - -data_plot_nsample_var <- data.frame( - sample_sizes = nsample_vector, - sample_sizes_num = c(1, 2, 3, 4), - start_linear = var_nsample_Bay$result_table$linear, - end_linear = results_var_MCP_nsample$Linear, - start_exp = var_nsample_Bay$result_table$exponential, - end_exp = results_var_MCP_nsample$Exponential, - start_emax = var_nsample_Bay$result_table$emax, - end_emax = results_var_MCP_nsample$Emax -) - - -# Create the plot -ggplot(data = data_plot_nsample_var, aes(x = sample_sizes_num)) + - geom_segment(aes(x = sample_sizes_num - 0.3, xend = sample_sizes_num - 0.3, y = start_linear, yend = end_linear, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.4, xend = sample_sizes_num - 0.2 , y = start_linear, yend = start_linear, color = "linear"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num - 0.05, xend = sample_sizes_num - 0.05, y = start_exp, yend = end_exp, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num - 0.15, xend = sample_sizes_num + 0.05 , y = start_exp, yend = start_exp, color = "exponential"), size = 0.5) + - geom_segment(aes(x = sample_sizes_num + 0.2, xend = sample_sizes_num + 0.2, y = start_emax, yend = end_emax, color = "MCPMod power deviation"), size = 3) + - geom_segment(aes(x = sample_sizes_num + 0.1, xend = sample_sizes_num + 0.3 , y = start_emax, yend = start_emax, color = "emax"), size = 0.5) + - scale_x_continuous(breaks = data_plot_nsample_var$sample_sizes_num, labels = data_plot_nsample_var$sample_sizes) + - scale_color_manual(name = "Assumed true model (BayesianMCPMod)", values = c("linear" = "red", "exponential" = "blue", "emax" = "darkgreen", "logistic" = "orange", "sigemax" = "purple", "beta" = "deepskyblue", "quadratic" = "deeppink", "MCPMod power deviation" = "grey"))+ - theme_minimal() + - ylab("Power") + - ylim(c(0.25,1))+ - xlab("sample sizes")+ - ggtitle("Power values different sample sizes")+ - geom_vline(xintercept = data_plot_nsample_min$sample_sizes_num + 0.5, linetype="dashed", color = "black") + - theme(legend.position = "bottom") - -``` - -## convergence of power values - -In the following simulations, we examine the convergence of power values for an increasing number of simulations. We are considering the following number of simulations: 100, 500, 1000, 2500, 5000. - -```{r child = 'Comparison_convergence.qmd'} - -``` - -### minimal scenario - - -```{r} -#safe results in data.table for plot -results_nsim <- data.table( - MCP_linear = c(results_list_nsim_MCP[[1]]$sim_results$power, results_list_nsim_MCP[[2]]$sim_results$power, results_list_nsim_MCP[[3]]$sim_results$power, results_list_nsim_MCP[[4]]$sim_results$power, results_list_nsim_MCP[[5]]$sim_results$power), - MCP_exp = c(results_list_nsim_MCP[[6]]$sim_results$power, results_list_nsim_MCP[[7]]$sim_results$power, results_list_nsim_MCP[[8]]$sim_results$power, results_list_nsim_MCP[[9]]$sim_results$power, results_list_nsim_MCP[[10]]$sim_results$power), - MCP_emax = c(results_list_nsim_MCP[[11]]$sim_results$power, results_list_nsim_MCP[[12]]$sim_results$power, results_list_nsim_MCP[[13]]$sim_results$power, results_list_nsim_MCP[[14]]$sim_results$power, results_list_nsim_MCP[[15]]$sim_results$power), - Bay_linear = results_nsim_Bay$Bay_linear, - Bay_exp = results_nsim_Bay$Bay_exponential, - Bay_emax = results_nsim_Bay$Bay_emax) - -results_nsim_diff <- data.table( - linear = results_nsim$Bay_linear - results_nsim$MCP_linear, - exponential = results_nsim$Bay_exp - results_nsim$MCP_exp, - emax = results_nsim$Bay_emax - results_nsim$MCP_emax, - n_sim = n_sim_values -) - -results_nsim_diff <- melt(results_nsim_diff, id.vars = "n_sim") - -plot_nsim <- plot_power_deviation(results_nsim_diff, results_nsim_diff$n_sim, "nsim", c(100, 500, 1000, 2500, 5000)) -plot_nsim -``` - -### monotonic scenario - - - -```{r} -#safe results in data.table for plot -results_nsim_monotonic <- data.table( - MCP_linear = c(results_list_nsim_MCP_monotonic[[1]]$sim_results$power, results_list_nsim_MCP_monotonic[[2]]$sim_results$power, results_list_nsim_MCP_monotonic[[3]]$sim_results$power, results_list_nsim_MCP_monotonic[[4]]$sim_results$power, results_list_nsim_MCP_monotonic[[5]]$sim_results$power), - MCP_exp = c(results_list_nsim_MCP_monotonic[[6]]$sim_results$power, results_list_nsim_MCP_monotonic[[7]]$sim_results$power, results_list_nsim_MCP_monotonic[[8]]$sim_results$power, results_list_nsim_MCP_monotonic[[9]]$sim_results$power, results_list_nsim_MCP_monotonic[[10]]$sim_results$power), - MCP_emax = c(results_list_nsim_MCP_monotonic[[11]]$sim_results$power, results_list_nsim_MCP_monotonic[[12]]$sim_results$power, results_list_nsim_MCP_monotonic[[13]]$sim_results$power, results_list_nsim_MCP_monotonic[[14]]$sim_results$power, results_list_nsim_MCP_monotonic[[15]]$sim_results$power), - MCP_logistic = c(results_list_nsim_MCP_monotonic[[16]]$sim_results$power, results_list_nsim_MCP_monotonic[[17]]$sim_results$power, results_list_nsim_MCP_monotonic[[18]]$sim_results$power, results_list_nsim_MCP_monotonic[[19]]$sim_results$power, results_list_nsim_MCP_monotonic[[20]]$sim_results$power), - MCP_sigemax = c(results_list_nsim_MCP_monotonic[[21]]$sim_results$power, results_list_nsim_MCP_monotonic[[22]]$sim_results$power, results_list_nsim_MCP_monotonic[[23]]$sim_results$power, results_list_nsim_MCP_monotonic[[24]]$sim_results$power, results_list_nsim_MCP_monotonic[[25]]$sim_results$power), - Bay_linear = results_nsim_Bay_monotonic$Bay_linear, - Bay_exp = results_nsim_Bay_monotonic$Bay_exponential, - Bay_emax = results_nsim_Bay_monotonic$Bay_emax, - Bay_logistic = results_nsim_Bay_monotonic$Bay_logistic, - Bay_sigemax = results_nsim_Bay_monotonic$Bay_sigEmax) - -results_nsim_diff_monotonic <- data.table( - linear = results_nsim_monotonic$Bay_linear - results_nsim_monotonic$MCP_linear, - exponential = results_nsim_monotonic$Bay_exp - results_nsim_monotonic$MCP_exp, - emax = results_nsim_monotonic$Bay_emax - results_nsim_monotonic$MCP_emax, - logistic = results_nsim_monotonic$Bay_logistic - results_nsim_monotonic$MCP_logistic, - sigemax = results_nsim_monotonic$Bay_sigemax - results_nsim_monotonic$MCP_sigemax, - n_sim = n_sim_values -) - -results_nsim_diff_monotonic <- melt(results_nsim_diff_monotonic, id.vars = "n_sim") - -plot_nsim_monotonic <- plot_power_deviation(results_nsim_diff_monotonic, results_nsim_diff_monotonic$n_sim, "nsim", c(100, 500, 1000, 2500, 5000)) -plot_nsim_monotonic -``` - -### non - monotonic scenario - - - -```{r} -#safe results in data.table for plot -results_nsim_non_monotonic <- data.table( - MCP_linear = c(results_list_nsim_MCP_non_monotonic[[1]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[2]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[3]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[4]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[5]]$sim_results$power), - MCP_emax = c(results_list_nsim_MCP_non_monotonic[[6]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[7]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[8]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[9]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[10]]$sim_results$power), - MCP_sigemax = c(results_list_nsim_MCP_non_monotonic[[11]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[12]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[13]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[14]]$sim_results$power, results_list_nsim_MCP_non_monotonic[[15]]$sim_results$power), - #MCP_quadratic = results_list_nsim_MCP_non_monotonic$power_non_monotonic_nsim_sigemax, - Bay_linear = results_nsim_Bay_non_monotonic$Bay_linear, - Bay_emax = results_nsim_Bay_monotonic$Bay_emax, - Bay_sigemax = results_nsim_Bay_non_monotonic$Bay_sigEmax, - Bay_quadratic = results_nsim_Bay_non_monotonic$Bay_quadratic) - -results_nsim_diff_non_monotonic <- data.table( - linear = results_nsim_non_monotonic$Bay_linear - results_nsim_non_monotonic$MCP_linear, - emax = results_nsim_non_monotonic$Bay_emax - results_nsim_non_monotonic$MCP_emax, - sigemax =results_nsim_non_monotonic$Bay_sigemax - results_nsim_non_monotonic$MCP_sigemax, - #quadratic = results_nsim_non_monotonic$Bay_quadratic - results_nsim_non_monotonic$MCP_quadratic, - n_sim = n_sim_values -) - - -results_nsim_diff_non_monotonic <- melt(results_nsim_diff_non_monotonic, id.vars = "n_sim") - -plot_nsim_non_monotonic <- plot_power_deviation(results_nsim_diff_non_monotonic, results_nsim_diff_non_monotonic$n_sim, "nsim", c(100, 500, 1000, 2500, 5000)) -plot_nsim_non_monotonic - -``` - -### variability scenario - - - -```{r} -#safe results in data.table for plot -results_nsim_var <- data.table( - MCP_linear = c(results_list_nsim_MCP_var[[1]]$sim_results$power, results_list_nsim_MCP_var[[2]]$sim_results$power, results_list_nsim_MCP_var[[3]]$sim_results$power, results_list_nsim_MCP_var[[4]]$sim_results$power, results_list_nsim_MCP_var[[5]]$sim_results$power), - MCP_exp = c(results_list_nsim_MCP_var[[6]]$sim_results$power, results_list_nsim_MCP_var[[7]]$sim_results$power, results_list_nsim_MCP_var[[8]]$sim_results$power, results_list_nsim_MCP_var[[9]]$sim_results$power, results_list_nsim_MCP_var[[10]]$sim_results$power), - MCP_emax = c(results_list_nsim_MCP_var[[11]]$sim_results$power, results_list_nsim_MCP_var[[12]]$sim_results$power, results_list_nsim_MCP_var[[13]]$sim_results$power, results_list_nsim_MCP_var[[14]]$sim_results$power, results_list_nsim_MCP_var[[15]]$sim_results$power), - Bay_linear = results_nsim_Bay_var$Bay_linear, - Bay_exp = results_nsim_Bay_var$Bay_exponential, - Bay_emax = results_nsim_Bay_var$Bay_emax -) - -results_nsim_diff_var <- data.table( - linear = results_nsim_var$Bay_linear - results_nsim_var$MCP_linear, - exponential = results_nsim_var$Bay_exp - results_nsim_var$MCP_exp, - emax = results_nsim_var$Bay_emax - results_nsim_var$MCP_emax, - n_sim = n_sim_values - ) - -results_nsim_diff_var <- melt(results_nsim_diff_var, id.vars = "n_sim") - -plot_nsim_var <- plot_power_deviation(results_nsim_diff_var, results_nsim_diff_var$n_sim, "nsim", c(100, 500, 1000, 2500, 5000)) -plot_nsim_var -``` - -```{r} -# save(results_nsim_Bay, results_list_nsim_MCP, results_nsim_monotonic, results_nsim_non_monotonic, results_nsim_var, results_min_MCP, results_monotonic_MCP, results_non_monotonic_MCP, results_var_MCP, results_min_MCP_nsample, results_monotonic_MCP_nsample, results_non_monotonic_MCP_nsample, results_var_MCP_nsample, results_min_Bay, results_min_Bay_nsample, results_monotonic_Bay, results_monotonic_Bay_nsample, results_non_monotonic_Bay, results_non_monotonic_Bay_nsample, results_variability_Bay, results_variability_Bay_nsample, -# file = "simulation_data.RData") -``` diff --git a/vignettes/outdatet/Simulation_Example.Rmd b/vignettes/outdatet/Simulation_Example.Rmd deleted file mode 100644 index 4b36538..0000000 --- a/vignettes/outdatet/Simulation_Example.Rmd +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: "Simulation Example of Bayesian MCPMod for Continuous Data" -output: rmarkdown::html_vignette -number_sections: true -vignette: > - %\VignetteIndexEntry{Simulation Example of Bayesian MCPMod for Continuous Data} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -```{r setup} -library(BayesianMCPMod) -library(clinDR) -library(dplyr) - -set.seed(7015) -``` - -# Background and data - -In this vignette, we will show the use of the Bayesian MCPMod package for trial planning for continuous distributed data. -As in [the analysis example vignette](analysis_normal.html), we focus on the indication MDD and make use of historical data that is included in the clinDR package. -More specifically, trial results for BRINTELLIX will be utilized to establish an informative prior for the control group. - -# Calculation of a MAP prior -In a first step, a meta analytic predictive prior will be calculated using historical data from 5 trials with main endpoint Change from baseline in MADRS score after 8 weeks. -Please note that only information from the control group will be integrated leading to an informative mixture prior for the control group, while for the active groups a non-informative prior will be specified. - -```{r Historical Data} -data("metaData") -testdata <- as.data.frame(metaData) -dataset <- filter(testdata, bname == "BRINTELLIX") -histcontrol <- filter(dataset, dose == 0, primtime == 8, indication == "MAJOR DEPRESSIVE DISORDER") - -hist_data <- data.frame( - trial = histcontrol$nctno, - est = histcontrol$rslt, - se = histcontrol$se, - sd = histcontrol$sd, - n = histcontrol$sampsize) - -sd_tot <- with(hist_data, sum(sd * n) / sum(n)) -``` -We will make use of the same getPriorList() function as in the [analysis example vignette](analysis_normal.html) to create a MAP prior. -```{r Setting Prior without execution, eval = FALSE} -dose_levels <- c(0, 2.5, 5, 10, 20) - -prior_list <- getPriorList( - hist_data = hist_data, - dose_levels = dose_levels, - robust_weight = 0.3) -``` -```{r Setting Prior, echo = FALSE} -dose_levels <- c(0, 2.5, 5, 10, 20) - -prior_list <- list( - Ctr = RBesT::mixnorm( - comp1 = c(w = 0.446213, m = -12.774661, s = 1.393130), - comp1 = c(w = 0.253787, m = 3.148116, s = 3.148116), - robust = c(w = 0.3, m = 9.425139, s = 9.425139), - sigma = sd_tot), - DG_1 = RBesT::mixnorm( - comp1 = c(w = 1, m = -12.816875, n = 1), - sigma = sd_tot, - param = "mn"), - DG_2 = RBesT::mixnorm( - comp1 = c(w = 1, m = -12.816875, n = 1), - sigma = sd_tot, - param = "mn"), - DG_3 = RBesT::mixnorm( - comp1 = c(w = 1, m = -12.816875, n = 1), - sigma = sd_tot, - param = "mn"), - DG_4 = RBesT::mixnorm( - comp1 = c(w = 1, m = -12.816875, n = 1), - sigma = sd_tot, - param = "mn") -) -``` - -# Specification of new trial design - -For the hypothetical new trial, we plan with 4 active dose levels \eqn{2.5, 5, 10, 20} and we specify a broad set of potential dose-response relationships, including a linear, an exponential, an emax and 2 sigEMax models. -Furthermore, we assume a maximum effect of -3 on top of control (i.e. assuming that active treatment can reduce the MADRS score after 8 weeks by up to 15.8) and plan a trial with 80 patients for all active groups and 60 patients for control. -```{r} -exp <- DoseFinding::guesst( - d = 5, - p = c(0.2), - model = "exponential", - Maxd = max(dose_levels)) - -emax <- DoseFinding::guesst( - d = 2.5, - p = c(0.9), - model = "emax") - -sigemax <- DoseFinding::guesst( - d = c(2.5, 5), - p = c(0.1, 0.6), - model = "sigEmax") - -sigemax2 <- DoseFinding::guesst( - d = c(2, 4), - p = c(0.3, 0.8), - model = "sigEmax") - -mods <- DoseFinding::Mods( - linear = NULL, - emax = emax, - exponential = exp, - sigEmax = rbind(sigemax, sigemax2), - doses = dose_levels, - maxEff = -3, - placEff = -12.8) - -n_patients <- c(60, 80, 80, 80, 80) -``` - -# Calculation of success probabilities - -To calculate success probabilities for the different assumed dose-response models and the specified trial design we will apply the assessDesign function. -For illustration purposes, the number of simulated trial results is reduced to 100 in this example. -```{r} -success_probabilities <- assessDesign( - n_patients = n_patients, - mods = mods, - prior_list = prior_list, - sd = sd_tot, - n_sim = 100) # speed up example run-time - -success_probabilities -``` -As an alternative, we will evaluate a design with the same overall sample size but allocating more patients on the highest dose group and control. -```{r} -success_probabilities_uneq <- assessDesign( - n_patients = c(80, 60, 60, 60, 120), - mods = mods, - prior_list = prior_list, - sd = sd_tot, - n_sim = 100) # speed up example run-time -success_probabilities_uneq -``` -For this specific trial setting the adapted allocation ratio leads to increased success probabilities under all assumed dose response relationships. - -Instead of specifying the assumed effects via the models it is also possible to directly specify the effects for the individual dose levels via the dr_means input. -This allows e.g. also the simulation of scenarios with a prior-data conflict. -```{r} -success_probabilities <- assessDesign( - n_patients = c(60, 80, 80, 80, 80), - mods = mods, - prior_list = prior_list, - sd = sd_tot, - dr_means = c(-12, -14, -15, -16, -17), - n_sim = 100) # speed up example run-time -success_probabilities -``` \ No newline at end of file diff --git a/vignettes/outdatet/analysis_normal.Rmd b/vignettes/outdatet/analysis_normal.Rmd deleted file mode 100644 index 9edb6a2..0000000 --- a/vignettes/outdatet/analysis_normal.Rmd +++ /dev/null @@ -1,286 +0,0 @@ ---- -title: "Analysis Example of Bayesian MCPMod for Continuous Data" -output: rmarkdown::html_vignette -number_sections: true -vignette: > - %\VignetteIndexEntry{Analysis Example of Bayesian MCPMod for Continuous Data} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>" -) -``` - -```{r setup} -library(BayesianMCPMod) -library(clinDR) -library(dplyr) - -set.seed(7015) -``` - -# Background and data - -In this vignette we will show the use of the Bayesian MCPMod package for continuous distributed data. -The focus lies on the utilization of an informative prior and the Bayesian MCPMod evaluation of a single trial. -We will use data that is included in the clinDR package. -More specifically, trial results of BRINTELLIX will be used to illustrate the specification of an informative prior and the usage of such a prior for the Bayesian evaluation of a (hypothetical) new trial. -BRINTELLIX is a medication used to treat major depressive disorder. -Various clinical trials with different dose groups, including control groups, were conducted. - -# Calculation of a MAP prior - -In a first step, a meta analytic prior will be calculated using historical data from 4 trials with main endpoint Change from baseline in MADRS score after 8 weeks. -Please note that only information from the control group will be integrated leading to an informative mixture prior for the control group, while for the active groups a non-informative prior will be specified. -```{r Historical Data for Control Arm} -data("metaData") -dataset <- filter(as.data.frame(metaData), bname == "BRINTELLIX") -histcontrol <- filter( - dataset, - dose == 0, - primtime == 8, - indication == "MAJOR DEPRESSIVE DISORDER", - protid != 5) - -hist_data <- data.frame( - trial = histcontrol$nctno, - est = histcontrol$rslt, - se = histcontrol$se, - sd = histcontrol$sd, - n = histcontrol$sampsize) -``` -Here, we suggest a function to construct a list of prior distributions for the different dose groups. -This function is adapted to the needs of this example. -Other applications may need a different way to construct prior distributions. -```{r Defining MAP prior function} -getPriorList <- function ( - - hist_data, - dose_levels, - dose_names = NULL, - robust_weight = 0.5 - -) { - - sd_tot <- with(hist_data, sum(sd * n) / sum(n)) - - gmap <- RBesT::gMAP( - formula = cbind(est, se) ~ 1 | trial, - weights = hist_data$n, - data = hist_data, - family = gaussian, - beta.prior = cbind(0, 100 * sd_tot), - tau.dist = "HalfNormal", - tau.prior = cbind(0, sd_tot / 4)) - - prior_ctr <- RBesT::automixfit(gmap) - - if (!is.null(robust_weight)) { - - prior_ctr <- suppressMessages(RBesT::robustify( - priormix = prior_ctr, - weight = robust_weight, - sigma = sd_tot)) - - } - - prior_trt <- RBesT::mixnorm( - comp1 = c(w = 1, m = summary(prior_ctr)[1], n = 1), - sigma = sd_tot, - param = "mn") - - prior_list <- c(list(prior_ctr), - rep(x = list(prior_trt), - times = length(dose_levels[-1]))) - - if (is.null(dose_names)) { - - dose_names <- c("Ctr", paste0("DG_", seq_along(dose_levels[-1]))) - - } - - names(prior_list) <- dose_names - - return (prior_list) - -} -``` -With the dose levels to be investigated, the prior distribution can be constructed. -```{r Getting the MAP prior} -dose_levels <- c(0, 2.5, 5, 10) - -prior_list <- getPriorList( - hist_data = hist_data, - dose_levels = dose_levels, - robust_weight = 0.3) - -getESS(prior_list) -``` - -# Specifications for the new trial - -To be able to apply the Bayesian MCPMod approach, candidate models need to be specified using functions from the R package DoseFinding. -Since there are only 3 active dose levels we will limit the set of candidate models to a linear, an exponential, and an emax model. -Note that the linear candidate model does not require a guesstimate. -```{r Pre-Specification of candidate models} -exp_guesst <- DoseFinding::guesst( - d = 5, - p = c(0.2), - model = "exponential", - Maxd = max(dose_levels)) - -emax_guesst <- DoseFinding::guesst( - d = 2.5, - p = c(0.9), - model = "emax") - -mods <- DoseFinding::Mods( - linear = NULL, - emax = emax_guesst, - exponential = exp_guesst, - doses = dose_levels, - maxEff = -1, - placEff = -12.8) -``` -We will use the trial with ct.gov number NCT00735709 as exemplary new trial. -```{r new trial} -new_trial <- filter( - dataset, - primtime == 8, - indication == "MAJOR DEPRESSIVE DISORDER", - protid == 5) - -n_patients <- c(128, 124, 129, 122) -``` -# Combination of prior information and trial results -As outlined in Fleischer et al. [Bayesian MCPMod. Pharmaceutical Statistics. 2022; 21(3): 654-670.], in a first step the posterior is calculated combining the prior information with the estimated results of the new trial. Via the summary function it is possible to print out the summary information of the posterior distributions. -```{r Trial results} -posterior <- getPosterior( - prior = prior_list, - mu_hat = new_trial$rslt, - S_hat = new_trial$se, - calc_ess = TRUE) - -summary(posterior) -``` -# Execution of Bayesian MCPMod Test step - -For the execution of the testing step of Bayesian MCPMod a critical value on the probability scale will be determined for a given alpha level. -This critical value is calculated using the re-estimated contrasts for the frequentist MCPMod to ensure that, when using non-informative priors, the actual error level for falsely declaring a significant trial in the Bayesian MCPMod is controlled by the specified alpha level. - -A pseudo-optimal contrast matrix is generated based on the variability of the posterior distribution (see Fleischer et al. 2022 for more details). -```{r Preparation of input for Bayesian MCPMod Test step} -crit_pval <- getCritProb( - mods = mods, - dose_levels = dose_levels, - se_new_trial = new_trial$se, - alpha_crit_val = 0.05) - -contr_mat <- getContr( - mods = mods, - dose_levels = dose_levels, - sd_posterior = summary(posterior)[, 2]) -``` -Please note that there are different ways to derive the contrasts. -The following code shows the implementation of some of these ways but it is not executed and the contrast specification above is used. -```{r , eval = FALSE} -# i) the frequentist contrast -contr_mat_prior <- getContr( - mods = mods, - dose_levels = dose_levels, - dose_weights = n_patients, - prior_list = prior_list) -# ii) re-estimated frequentist contrasts -contr_mat_prior <- getContr( - mods = mods, - dose_levels = dose_levels, - se_new_trial = new_trial$se) -# iii) Bayesian approach using number of patients for new trial and prior distribution -contr_mat_prior <- getContr( - mods = mods, - dose_levels = dose_levels, - dose_weights = n_patients, - prior_list = prior_list) -``` -The Bayesian MCP testing step is then executed based on the posterior information, the provided contrasts and the multiplicity adjusted critical value. -```{r Execution of Bayesian MCPMod Test step} -BMCP_result <- performBayesianMCP( - posterior_list = posterior, - contr = contr_mat, - crit_prob_adj = crit_pval) - -BMCP_result -``` -The testing step is significant indicating a non-flat dose-response shape. -In detail, all 3 models are significant and the p-value for the emax model indicates deviation from the null hypothesis the most. - -# Model fitting and visualization of results - -In the model fitting step the posterior distribution is used as basis. -Both simplified and full fitting are performed. -For the simplified fit, the multivariate normal distribution of the control group is approximated and reduced by a one-dimensional normal distribution. -The actual fit (on this approximated posterior distribution) is then performed using generalized least squares criterion. -In contrast, for the full fit, the non-linear optimization problem is addressed via the Nelder Mead algorithm. - -The output of the fit includes information about the predicted effects for the included dose levels, the generalized AIC, and the corresponding weights. -For the considered case, the simplified and the full fit are very similar. -```{r Model fitting} -# Option a) Simplified approach by using approximated posterior distribution -fit_simple <- getModelFits( - models = names(mods), - dose_levels = dose_levels, - posterior = posterior, - simple = TRUE) - -# Option b) Making use of the complete posterior distribution -fit <- getModelFits( - models = names(mods), - dose_levels = dose_levels, - posterior = posterior, - simple = FALSE) -``` -Via the predict() function, one can also receive estimates for dose levels that were not included in the trial. -```{r Predict} -predict(fit, doses = c(0, 2.5, 4, 5, 7, 10)) -``` -It is possible to plot the fitted dose response models and an AIC based average model (black lines). -```{r Plot simple vs fit} -plot(fit_simple) -plot(fit) -``` - -To assess the uncertainty, one can additionally visualize credible bands (orange shaded areas, default levels are 50% and 95%). -These credible bands are calculated with a bootstrap method as follows: -Samples from the posterior distribution are drawn and for every sample the simplified fitting step and a prediction is performed. -These predictions are then used to identify and visualize the specified quantiles. -```{r Plot with bootstrap} -plot(fit, cr_bands = TRUE) -``` - -The bootstrap based quantiles can also be directly calculated via the getBootstrapQuantiles() function. -For this example, only 6 quantiles are bootstrapped for each model fit. -```{r Bootstrap} -getBootstrapQuantiles( - model_fits = fit, - quantiles = c(0.025, 0.5, 0.975), - doses = c(0, 2.5, 4, 5, 7, 10), - n_samples = 6 -) -``` -Technical note: The median quantile of the bootstrap based procedure is not necessary similar to the main model fit, as they are derived via different procedures. -The main fit, i.e. the black lines in the plot, show the best fit of a certain model based on minimizing the residuals for the posterior distribution, while the bootstrap based 50% quantile shows the median fit of the random sampling and fitting procedure. - -# Additional note -It is also possible to perform the testing and modeling step in a combined fashion via the performBayesianMCPMod() function. -This code serves merely as an example and is not run in this vignette. -```{r, eval = FALSE} -performBayesianMCPMod( - posterior_list = posterior, - contr = contr_mat, - crit_prob_adj = crit_pval, - simple = FALSE) -``` diff --git a/vignettes/outdatet/analysis_normal_noninformative.qmd b/vignettes/outdatet/analysis_normal_noninformative.qmd deleted file mode 100644 index 06e8ecc..0000000 --- a/vignettes/outdatet/analysis_normal_noninformative.qmd +++ /dev/null @@ -1,414 +0,0 @@ ---- -title: "Analysis Example of Bayesian MCPMod for Continuous Data with non-informative prior" -format: - html: - fig-height: 3.5 - self-contained: true - toc: true - number-sections: true - bibliography: references.bib -vignette: > - %\VignetteIndexEntry{Analysis Example of Bayesian MCPMod for Continuous Data with non-informative prior} - %\VignetteEncoding{UTF-8} - %\VignetteEngine{quarto::html} ---- - -```{r setup} -devtools::load_all() -#library(BayesianMCPMod) -library(RBesT) -library(clinDR) -library(dplyr) -library(tibble) -library(reactable) - -set.seed(7015) -``` - -```{r} -#| code-summary: setup -#| code-fold: true -#| message: false -#| warning: false - -#' Display Parameters Table -#' -#' This function generates a markdown table displaying the names and values of parameters -#' from a named list. -#' -#' @param named_list A named list where each name represents a parameter name and the list -#' element represents the parameter value. Date values in the list are automatically -#' converted to character strings for display purposes. -#' -#' @return Prints a markdown table with two columns: "Parameter Name" and "Parameter Values". -#' The function does not return a value but displays the table directly to the output. -#' -#' @importFrom knitr kable -#' @examples -#' params <- list("Start Date" = as.Date("2020-01-01"), -#' "End Date" = as.Date("2020-12-31"), -#' "Threshold" = 10) -#' display_params_table(params) -#' -#' @export -display_params_table <- function(named_list) { - display_table <- data.frame() - value_names <- data.frame() - for (i in 1:length(named_list)) { - # dates will display as numeric by default, so convert to char first - if (class(named_list[[i]]) == "Date") { - named_list[[i]] = as.character(named_list[[i]]) - } - if (!is.null(names(named_list[[i]]))) { - value_names <- rbind(value_names, paste(names(named_list[[i]]), collapse = ', ')) - } - values <- data.frame(I(list(named_list[[i]]))) - display_table <- rbind(display_table, values) - } - - round_numeric <- function(x, digits = 3) { - if (is.numeric(x)) { - return(round(x, digits)) - } else { - return(x) - } - } - - display_table[1] <- lapply(display_table[1], function(sublist) { - lapply(sublist, round_numeric) - }) - - class(display_table[[1]]) <- "list" - - if (nrow(value_names) == 0) { - knitr::kable( - cbind(names(named_list), display_table), - col.names = c("Name", "Value") - ) - } else { - knitr::kable( - cbind(names(named_list), value_names, display_table), - col.names = c("Name", "Value Labels", "Value") - ) - } -} -``` - -# Introduction - -This vignette demonstrates the application of the {BayesianMCPMod} package for -analyzing a phase 2 dose-finding trial using the Bayesian MCPMod approach. - -# Prior Specification - -Ideally, priors are grounded in historical data. This approach allows for the synthesis of -prior knowledge with current data, enhancing the accuracy of trial evaluations. - -The focus of this vignette is more generic, however. We specify weakly-informative -priors across all dose groups to allow the trial data to have a stronger influence on the analysis. - -```{r} -dose_levels <- c(0, 2.5, 5, 10) - -prior_list <- lapply(dose_levels, function(dose_group) { - RBesT::mixnorm(weak = c(w = 1, m = 0, s = 200), sigma = 10) -}) - -names(prior_list) <- c("Ctr", paste0("DG_", dose_levels[-1])) -``` - -# Dose-Response Model Shapes - -Candidate models are specified using the {DoseFinding} package. Models can be -parameterized using guesstimates or by directly providing distribution parameters. -Note that the linear candidate model does not require parameterization. - -[**Note:** The LinLog model is rarely used and not currently supported by `{BayesianMCPMod}`.]{.aside} - -In the code below, the models are "guesstimated" using the `DoseFinding::guesst` function. -The `d` option usually takes a single value (a dose level), and the corresponding `p` -for the maximum effect achieved at `d`. - - -```{r} -# Guesstimate estimation -exp_guesst <- DoseFinding::guesst( - model = "exponential", - d = 5, p = 0.2, Maxd = max(dose_levels) -) -emax_guesst <- DoseFinding::guesst( - model = "emax", - d = 2.5, p = 0.9 -) -sigEmax_guesst <- DoseFinding::guesst( - model = "sigEmax", - d = c(2.5, 5), p = c(0.5, 0.95) -) -logistic_guesst <- DoseFinding::guesst( - model = "logistic", - d = c(5, 10), p = c(0.1, 0.85) -) -``` - -In some cases, you need to provide more information. For instance, `sigEmax` -requires a pair of `d` and `p` values, and `exponential` requires the specification of -the maximum dose for the trial (`Maxd`). - -[See the help files for model specifications by typing `?DoseFinding::guesst` in your console]{.aside} - - - -Of course, you can also specify the models directly on the parameter scale (without using `DoseFinding::guesst`). - -For example, you can get a betaMod model by specifying `delta1` and `delta2` -parameters (`scale` is assumed to be `1.2` of the maximum dose), or a quadratic model with the `delta2` parameter. - -```{r} -betaMod_params <- c(delta1 = 1, delta2 = 1) -quadratic_params <- c(delta2 = -0.1) -``` - -Now, we can go ahead and create a `Mods` object, which will be used in the remainder -of the vignette. - -```{r} -mods <- DoseFinding::Mods( - linear = NULL, - # guesstimate scale - exponential = exp_guesst, - emax = emax_guesst, - sigEmax = sigEmax_guesst, - logistic = logistic_guesst, - # parameter scale - betaMod = betaMod_params, - quadratic = quadratic_params, - # Options for all models - doses = dose_levels, - maxEff = -1, - placEff = -12.8 -) - -plot(mods) -``` - -The `mods` object we just created above contains the full model parameters, which can be helpful for -understanding how the guesstimates are translated onto the parameter scale. - -```{r} -display_params_table(mods) -``` - -And we can see the assumed treatment effects for the specified dose groups below: - -```{r} -knitr::kable(DoseFinding::getResp(mods, doses = dose_levels)) -``` - -## Trial Data - -We will use the trial with ct.gov number NCT00735709 as our phase 2 trial data, -available in the `{clinDR}` package [@nct00735709_2024a]. - -```{r} -data("metaData") - -trial_data <- dplyr::filter( - dplyr::filter(tibble::tibble(metaData), bname == "BRINTELLIX"), - primtime == 8, - indication == "MAJOR DEPRESSIVE DISORDER", - protid == 5 -) - -n_patients <- c(128, 124, 129, 122) -``` - -# Posterior Calculation - -In the first step of Bayesian MCPMod, the posterior is calculated by combining -the prior information with the estimated results of the trial [@fleischer_2022]. - -```{r} -posterior <- getPosterior( - prior_list = prior_list, - mu_hat = trial_data$rslt, - S_hat = trial_data$se, - calc_ess = TRUE -) - -knitr::kable(summary(posterior)) -``` - -# Bayesian MCPMod Test Step - -The testing step of Bayesian MCPMod is executed using a critical value on the -probability scale and a pseudo-optimal contrast matrix. - -The critical value is calculated using (re-estimated) contrasts for frequentist -MCPMod to ensure error control when using weakly-informative priors. - -A pseudo-optimal contrast matrix is generated based on the variability of the -posterior distribution (see [@fleischer_2022] for more details). - -```{r} -crit_pval <- getCritProb( - mods = mods, - dose_levels = dose_levels, - se_new_trial = trial_data$se, - alpha_crit_val = 0.05 -) - -contr_mat <- getContr( - mods = mods, - dose_levels = dose_levels, - sd_posterior = summary(posterior)[, 2] -) -``` - -Please note that there are different ways to derive the contrasts. -The following code shows the implementation of some of these ways but it is not -executed and the contrast specification above is used. - -```{r} -#| eval: false - -# i) the frequentist contrast -contr_mat_prior <- getContr( - mods = mods, - dose_levels = dose_levels, - dose_weights = n_patients, - prior_list = prior_list) -# ii) re-estimated frequentist contrasts -contr_mat_prior <- getContr( - mods = mods, - dose_levels = dose_levels, - se_new_trial = trial_data$se) -# iii) Bayesian approach using number of patients for new trial and prior distribution -contr_mat_prior <- getContr( - mods = mods, - dose_levels = dose_levels, - dose_weights = n_patients, - prior_list = prior_list) -``` - -The Bayesian MCP testing step is then executed: - -```{r} -BMCP_result <- performBayesianMCP( - posterior_list = posterior, - contr = contr_mat, - crit_prob_adj = crit_pval) -``` - -Summary information: - -```{r} -BMCP_result -``` - -The testing step is significant, indicating a non-flat dose-response shape. -All models are significant, with the `emax` model indicating the greatest deviation -from the null hypothesis. - -# Model Fitting and Visualization - -In the model fitting step the posterior distribution is used as basis. - -Both simplified and full fitting are performed. - -For the simplified fit, the multivariate normal distribution of the control group -is approximated and reduced by a one-dimensional normal distribution. - -The actual fit (on this approximated posterior distribution) is then performed -using generalized least squares criterion. In contrast, for the full fit, the -non-linear optimization problem is addressed via the Nelder-Mead algorithm -[@neldermead_2024a] implemented by the `{nloptr}` package. - -The output of the fit includes information about the predicted effects for the -included dose levels, the generalized AIC, and the corresponding weights. - -For the considered case, the simplified and the full fit are very similar, so we -present the full fit. - -```{r} -# If simple = TRUE, uses approx posterior -# Here we use complete posterior distribution -fit <- getModelFits( - models = mods, - dose_levels = dose_levels, - posterior = posterior, - simple = FALSE) -``` - -Estimates for dose levels not included in the trial: - -```{r} -display_params_table(stats::predict(fit, doses = c(0, 2.5, 4, 5, 7, 10))) -``` - -Plots of fitted dose-response models and an AIC-based average model: - -```{r} -plot(fit) -``` - -To assess the uncertainty, one can additionally visualize credible bands -(orange shaded areas, default levels are 50% and 95%). - -These credible bands are calculated with a bootstrap method as follows: - -- Samples from the posterior distribution are drawn and for every sample the -simplified fitting step and a prediction is performed. - -- These predictions are then used to identify and visualize the specified quantiles. - -```{r} -plot(fit, cr_bands = TRUE) -``` - -The bootstrap based quantiles can also be directly calculated via the -`getBootstrapQuantiles()` function. - -For this example, only 6 quantiles are bootstrapped for each model fit. - -```{r} -bootstrap_quantiles <- getBootstrapQuantiles( - model_fits = fit, - quantiles = c(0.025, 0.5, 0.975), - doses = c(0, 2.5, 4, 5, 7, 10), - n_samples = 6 -) -``` - -```{r} -#| code-fold: true -reactable::reactable( - data = bootstrap_quantiles, - groupBy = "models", - columns = list( - doses = colDef(aggregate = "count", format = list(aggregated = colFormat(suffix = " doses"))), - "2.5%" = colDef(aggregate = "mean", format = list(aggregated = colFormat(prefix = "mean = ", digits = 2), cell = colFormat(digits = 4))), - "50%" = colDef(aggregate = "mean", format = list(aggregated = colFormat(prefix = "mean = ", digits = 2), cell = colFormat(digits = 4))), - "97.5%" = colDef(aggregate = "mean", format = list(aggregated = colFormat(prefix = "mean = ", digits = 2), cell = colFormat(digits = 4))) - ) -) -``` - -**Technical note:** The median quantile of the bootstrap based procedure is not -necessary similar to the main model fit, as they are derived via different procedures. - -The main fit (black line) minimizes residuals for the posterior distribution, -while the bootstrap median is the median fit of random sampling. - -# Additional note - -Testing and modeling can also be combined via `performBayesianMCPMod()`, -but this is not run here. - -```{r} -#| eval: false -performBayesianMCPMod( - posterior_list = posterior, - contr = contr_mat, - crit_prob_adj = crit_pval, - simple = FALSE) -```