From fbec1e7b2ca3279716e5529d4595c620a6e16cc9 Mon Sep 17 00:00:00 2001 From: Daniel Date: Wed, 6 Jun 2018 18:04:16 +0200 Subject: [PATCH] version 0.15.0 on CRAN, update website --- DESCRIPTION | 5 +- R/check_model_assumptions.R | 5 +- R/mean_n.R | 8 +- R/merMod_p.R | 9 +- R/mwu.R | 2 +- R/pseudo_r2.R | 2 +- R/rmse.R | 4 +- R/sjStatistics.R | 7 +- R/std_b.R | 6 +- R/weight.R | 2 +- R/xtab_statistics.R | 4 +- docs/articles/anova-statistics.html | 126 ++-- docs/articles/bayesian-statistics.html | 688 +++++++++--------- .../figure-html/unnamed-chunk-6-1.png | Bin 26919 -> 26515 bytes docs/articles/index.html | 7 +- docs/articles/mixedmodels-statistics.html | 237 +++--- docs/authors.html | 11 +- docs/docsearch.css | 31 +- docs/docsearch.js | 85 --- docs/favicon.ico | Bin 3101 -> 3101 bytes docs/index.html | 12 +- docs/news/index.html | 8 +- docs/pkgdown.css | 5 - docs/pkgdown.js | 232 +++--- docs/pkgdown.yml | 4 +- docs/reference/boot_ci.html | 7 +- docs/reference/bootstrap.html | 11 +- docs/reference/check_assumptions.html | 12 +- docs/reference/chisq_gof.html | 7 +- docs/reference/cod.html | 11 +- docs/reference/converge_ok.html | 7 +- docs/reference/cv.html | 11 +- docs/reference/cv_error.html | 7 +- docs/reference/deff.html | 2 +- docs/reference/efc.html | 2 +- docs/reference/eta_sq.html | 6 +- docs/reference/find_beta.html | 2 +- docs/reference/gmd.html | 2 +- docs/reference/grpmean.html | 18 +- docs/reference/hdi.html | 23 +- docs/reference/hoslem_gof.html | 2 +- docs/reference/icc.html | 16 +- docs/reference/index.html | 104 ++- docs/reference/inequ_trend.html | 2 +- docs/reference/is_prime.html | 2 +- docs/reference/mean_n.html | 2 +- docs/reference/mn.html | 194 ----- docs/reference/mwu.html | 8 +- docs/reference/nhanes_sample.html | 2 +- docs/reference/odds_to_rr.html | 2 +- docs/reference/overdisp.html | 2 +- docs/reference/p_value.html | 47 +- docs/reference/pca.html | 2 +- docs/reference/pred_accuracy.html | 12 +- docs/reference/pred_vars.html | 2 +- docs/reference/prop.html | 2 +- docs/reference/r2.html | 2 +- docs/reference/re_var.html | 14 +- docs/reference/reexports.html | 2 +- docs/reference/reliab_test.html | 6 +- docs/reference/rmse.html | 6 +- docs/reference/robust.html | 2 +- docs/reference/scale_weights.html | 2 +- docs/reference/se.html | 25 +- docs/reference/se_ybar.html | 2 +- docs/reference/sjstats-package.html | 2 +- docs/reference/smpsize_lmm.html | 2 +- docs/reference/std_beta.html | 8 +- docs/reference/svyglm.nb.html | 2 +- docs/reference/table_values.html | 17 +- docs/reference/tidy_stan.html | 2 +- docs/reference/typical_value.html | 2 +- docs/reference/var_pop.html | 2 +- docs/reference/weight.html | 4 +- docs/reference/wtd_sd.html | 2 +- docs/reference/xtab_statistics.html | 6 +- inst/doc/anova-statistics.html | 4 +- inst/doc/bayesian-statistics.html | 180 ++--- inst/doc/mixedmodels-statistics.R | 11 +- inst/doc/mixedmodels-statistics.Rmd | 15 +- inst/doc/mixedmodels-statistics.html | 35 +- man/check_assumptions.Rd | 5 +- man/cod.Rd | 2 +- man/mean_n.Rd | 8 +- man/mwu.Rd | 2 +- man/p_value.Rd | 9 +- man/rmse.Rd | 4 +- man/std_beta.Rd | 6 +- man/table_values.Rd | 7 +- man/weight.Rd | 2 +- man/xtab_statistics.Rd | 4 +- vignettes/mixedmodels-statistics.Rmd | 15 +- 92 files changed, 1119 insertions(+), 1329 deletions(-) delete mode 100644 docs/docsearch.js delete mode 100644 docs/reference/mn.html diff --git a/DESCRIPTION b/DESCRIPTION index a6212ad2..000839fb 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -2,9 +2,8 @@ Package: sjstats Type: Package Encoding: UTF-8 Title: Collection of Convenient Functions for Common Statistical Computations -Version: 0.14.3.9000 -Date: 2018-05-03 -Author: Daniel Lüdecke +Version: 0.15.0 +Date: 2018-06-06 Authors@R: person("Daniel", "Lüdecke", role = c("aut", "cre"), email = "d.luedecke@uke.de", comment = c(ORCID = "0000-0002-8895-3206")) Maintainer: Daniel Lüdecke Description: Collection of convenient functions for common statistical computations, diff --git a/R/check_model_assumptions.R b/R/check_model_assumptions.R index d4094a1b..b585045d 100644 --- a/R/check_model_assumptions.R +++ b/R/check_model_assumptions.R @@ -56,7 +56,8 @@ #' < 0.05 indicates a significant deviation from normal distribution. #' Note that this formal test almost always yields significant results #' for the distribution of residuals and visual inspection (e.g. qqplots) -#' are preferable (see \code{\link[sjPlot]{sjp.lm}} with \code{type = "ma"}). +#' are preferable (see \code{\link[sjPlot]{plot_model}} with +#' \code{type = "diag"}). #' \cr \cr #' \code{multicollin()} wraps \code{\link[car]{vif}} and returns #' the logical result as tibble. \code{TRUE}, if multicollinearity @@ -74,7 +75,7 @@ #' @note These formal tests are very strict and in most cases violation of model #' assumptions are alerted, though the model is actually ok. It is #' preferable to check model assumptions based on visual inspection -#' (see \code{\link[sjPlot]{sjp.lm}} with \code{type = "ma"}). +#' (see \code{\link[sjPlot]{plot_model}} with \code{type = "diag"}). #' #' @examples #' data(efc) diff --git a/R/mean_n.R b/R/mean_n.R index 35ffc7ce..62f7cff1 100644 --- a/R/mean_n.R +++ b/R/mean_n.R @@ -1,8 +1,8 @@ #' @title Row means with min amount of valid values #' @name mean_n #' @description This function is similar to the SPSS \code{MEAN.n} function and computes -#' row means from a \code{\link{data.frame}} or \code{\link{matrix}} if at least \code{n} -#' values of a row are valid (and not \code{\link{NA}}). +#' row means from a \code{data.frame} or \code{matrix} if at least \code{n} +#' values of a row are valid (and not \code{NA}). #' #' @param dat A data frame with at least two columns, where row means are applied. #' @param n May either be @@ -10,12 +10,12 @@ #' \item a numeric value that indicates the amount of valid values per row to calculate the row mean; #' \item or a value between 0 and 1, indicating a proportion of valid values per row to calculate the row mean (see 'Details'). #' } -#' If a row's sum of valid values is less than \code{n}, \code{\link{NA}} will be returned as row mean value. +#' If a row's sum of valid values is less than \code{n}, \code{NA} will be returned as row mean value. #' @param digits Numeric value indicating the number of decimal places to be used for rounding mean #' value. Negative values are allowed (see ‘Details’). #' #' @return A vector with row mean values of \code{df} for those rows with at least \code{n} -#' valid values. Else, \code{\link{NA}} is returned. +#' valid values. Else, \code{NA} is returned. #' #' @details Rounding to a negative number of \code{digits} means rounding to a power of #' ten, so for example mean_n(df, 3, digits = -2) rounds to the diff --git a/R/merMod_p.R b/R/merMod_p.R index 60bf70e2..c1f84b5e 100644 --- a/R/merMod_p.R +++ b/R/merMod_p.R @@ -10,7 +10,7 @@ #' conditional F-tests with Kenward-Roger approximation for the df (see #' 'Details'). #' -#' @return A \code{\link{tibble}} with the model coefficients' names (\code{term}), +#' @return A \code{data.frame} with the model coefficients' names (\code{term}), #' p-values (\code{p.value}) and standard errors (\code{std.error}). #' #' @details For linear mixed models (\code{lmerMod}-objects), the computation of @@ -23,6 +23,10 @@ #' \cr \cr #' If p-values already have been computed (e.g. for \code{merModLmerTest}-objects #' from the \CRANpkg{lmerTest}-package), these will be returned. +#' \cr \cr +#' The \code{print()}-method has a \code{summary}-argument, that - in +#' case \code{p.kr = TRUE} - also prints information on the approximated +#' degrees of freedom (see 'Examples'). #' #' @examples #' data(efc) @@ -38,7 +42,8 @@ #' #' # lme4-fit #' library(lme4) -#' fit <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) +#' sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) +#' fit <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) #' pv <- p_value(fit, p.kr = TRUE) #' #' # normal output diff --git a/R/mwu.R b/R/mwu.R index 76056587..e272bbac 100644 --- a/R/mwu.R +++ b/R/mwu.R @@ -1,7 +1,7 @@ #' @title Mann-Whitney-U-Test #' @name mwu #' @description This function performs a Mann-Whitney-U-Test (or Wilcoxon rank sum test, -#' see \code{\link{wilcox.test}} and \code{\link[coin]{wilcox_test}}) +#' see \code{\link[stats]{wilcox.test}} and \code{\link[coin]{wilcox_test}}) #' for \code{x}, for each group indicated by \code{grp}. If \code{grp} #' has more than two categories, a comparison between each combination of #' two groups is performed. \cr \cr diff --git a/R/pseudo_r2.R b/R/pseudo_r2.R index ff59a981..f0bf0cf1 100644 --- a/R/pseudo_r2.R +++ b/R/pseudo_r2.R @@ -6,7 +6,7 @@ #' an alternative to other Pseudo-R-squared values #' like Nakelkerke's R2 or Cox-Snell R2. #' -#' @param x Fitted \code{\link{glm}} or \code{\link[lme4]{glmer}} model. +#' @param x Fitted \code{\link[stats]{glm}} or \code{\link[lme4]{glmer}} model. #' #' @return The \code{D} Coefficient of Discrimination, also known as #' Tjur's R-squared value. diff --git a/R/rmse.R b/R/rmse.R index 946f9ae3..be9aece7 100644 --- a/R/rmse.R +++ b/R/rmse.R @@ -3,8 +3,8 @@ #' @description Compute root mean squared error, residual standard error or #' mean square error of fitted linear (mixed effects) models. #' -#' @param fit Fitted linear model of class \code{\link{lm}}, -#' \code{\link[lme4]{merMod}} (\pkg{lme4}) or \code{\link[nlme]{lme}} (\pkg{nlme}). +#' @param fit Fitted linear model of class \code{lm}, \code{merMod} (\pkg{lme4}) +#' or \code{lme} (\pkg{nlme}). #' @param normalized Logical, use \code{TRUE} if normalized rmse should be returned. #' #' @seealso \code{\link{r2}} for R-squared or pseude-R-squared values, and diff --git a/R/sjStatistics.R b/R/sjStatistics.R index e503b071..af83bd62 100644 --- a/R/sjStatistics.R +++ b/R/sjStatistics.R @@ -3,9 +3,10 @@ #' @description This function calculates a table's cell, row and column percentages as #' well as expected values and returns all results as lists of tables. #' -#' @param tab Simple \code{\link{table}} or \code{\link{ftable}} of which cell, row and column percentages -#' as well as expected values are calculated. Tables of class \code{\link{xtabs}} and other will -#' be coerced to \code{\link{ftable}} objects. +#' @param tab Simple \code{\link{table}} or \code{\link[stats]{ftable}} of which +#' cell, row and column percentages as well as expected values are calculated. +#' Tables of class \code{\link[stats]{xtabs}} and other will be coerced to +#' \code{ftable} objects. #' @param digits Amount of digits for the table percentage values. #' #' @return (Invisibly) returns a list with four tables: diff --git a/R/std_b.R b/R/std_b.R index 2e4ce5b5..1ecb9be5 100644 --- a/R/std_b.R +++ b/R/std_b.R @@ -3,8 +3,8 @@ #' @description Returns the standardized beta coefficients, std. error and confidence intervals #' of a fitted linear (mixed) models. #' -#' @param fit Fitted linear (mixed) model of class \code{lm} or -#' \code{\link[lme4]{merMod}} (\CRANpkg{lme4} package). +#' @param fit Fitted linear (mixed) model of class \code{lm} or \code{merMod} +#' (\CRANpkg{lme4} package). #' @param type If \code{fit} is of class \code{lm}, normal standardized coefficients #' are computed by default. Use \code{type = "std2"} to follow #' \href{http://www.stat.columbia.edu/~gelman/research/published/standardizing7.pdf}{Gelman's (2008)} @@ -31,7 +31,7 @@ #' \cr \cr and \cr \cr #' \code{head(model.matrix(nlme::gls(neg_c_7 ~ as.factor(e42dep), data = efc, na.action = na.omit)))}. #' \cr \cr -#' In such cases, use \code{\link{to_dummy}} to create dummies from +#' In such cases, use \code{\link[sjmisc]{to_dummy}} to create dummies from #' factors. #' #' @references \href{http://en.wikipedia.org/wiki/Standardized_coefficient}{Wikipedia: Standardized coefficient} diff --git a/R/weight.R b/R/weight.R index b07dfdc3..22e4ed1e 100644 --- a/R/weight.R +++ b/R/weight.R @@ -15,7 +15,7 @@ #' #' @details \code{weight2()} sums up all \code{weights} values of the associated #' categories of \code{x}, whereas \code{weight()} uses a -#' \code{\link{xtabs}} formula to weight cases. Thus, \code{weight()} +#' \code{\link[stats]{xtabs}} formula to weight cases. Thus, \code{weight()} #' may return a vector of different length than \code{x}. #' #' @note The values of the returned vector are in sorted order, whereas the values' diff --git a/R/xtab_statistics.R b/R/xtab_statistics.R index a23d8776..ea0feb1a 100644 --- a/R/xtab_statistics.R +++ b/R/xtab_statistics.R @@ -12,8 +12,8 @@ #' to be a data frame. If \code{x1} and \code{x2} are not specified, #' the first two columns of the data frames are used as variables #' to compute the crosstab. -#' @param tab A \code{\link{table}} or \code{\link{ftable}}. Tables of class -#' \code{\link{xtabs}} and other will be coerced to \code{\link{ftable}} +#' @param tab A \code{\link{table}} or \code{\link[stats]{ftable}}. Tables of class +#' \code{\link[stats]{xtabs}} and other will be coerced to \code{ftable} #' objects. #' @param x1 Name of first variable that should be used to compute the #' contingency table. If \code{data} is a table object, this argument diff --git a/docs/articles/anova-statistics.html b/docs/articles/anova-statistics.html index b775eb92..a9e1390f 100644 --- a/docs/articles/anova-statistics.html +++ b/docs/articles/anova-statistics.html @@ -30,7 +30,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -89,7 +89,7 @@

Statistics for Anova Tables

Daniel Lüdecke

-

2018-05-24

+

2018-06-06

Source: vignettes/anova-statistics.Rmd @@ -109,15 +109,15 @@

  • anova_stats()
  • Befor we start, we fit a simple model:

    -
    library(sjstats)
    -# load sample data
    -data(efc)
    -
    -# fit linear model
    -fit <- aov(
    -  c12hour ~ as.factor(e42dep) + as.factor(c172code) + c160age,
    -  data = efc
    -)
    +

    All functions accept objects of class aov or anova, so you can also use model fits from the car package, which allows fitting Anova’s with different types of sum of squares. Other objects, like lm, will be coerced to anova internally.

    The following functions return the effect size statistic as named numeric vector, using the model’s term names.

    @@ -126,87 +126,87 @@

    The eta-squared is the proportion of the total variability in the dependent variable that is accounted for by the variation in the independent variable. It is the ratio of the sum of squares for each group level to the total sum of squares. It can be interpreted as percentage of variance accounted for by a variable.

    For variables with 1 degree of freedeom (in the numerator), the square root of eta-squared is equal to the correlation coefficient r. For variables with more than 1 degree of freedom, eta-squared equals R2. This makes eta-squared easily interpretable. Furthermore, these effect sizes can easily be converted into effect size measures that can be, for instance, further processed in meta-analyses.

    Eta-squared can be computed simply with:

    -
    eta_sq(fit)
    -#> # A tibble: 3 x 2
    -#>   term                  etasq
    -#>   <chr>                 <dbl>
    -#> 1 as.factor(e42dep)   0.266  
    -#> 2 as.factor(c172code) 0.00540
    -#> 3 c160age             0.0484
    +

    Partial Eta-Squared

    The partial eta-squared value is the ratio of the sum of squares for each group level to the sum of squares for each group level plus the residual sum of squares. It is more difficult to interpret, because its value strongly depends on the variability of the residuals. Partial eta-squared values should be reported with caution, and Levine and Hullett (2002) recommend reporting eta- or omega-squared rather than partial eta-squared.

    Use the partial-argument to compute partial eta-squared values:

    -
    eta_sq(fit, partial = TRUE)
    -#> # A tibble: 3 x 2
    -#>   term                partial.etasq
    -#>   <chr>                       <dbl>
    -#> 1 as.factor(e42dep)         0.281  
    -#> 2 as.factor(c172code)       0.00788
    -#> 3 c160age                   0.0665
    +

    Omega-Squared

    While eta-squared estimates tend to be biased in certain situations, e.g. when the sample size is small or the independent variables have many group levels, omega-squared estimates are corrected for this bias.

    Omega-squared can be simply computed with:

    -
    omega_sq(fit)
    -#> # A tibble: 3 x 2
    -#>   term                omegasq
    -#>   <chr>                 <dbl>
    -#> 1 as.factor(e42dep)   0.263  
    -#> 2 as.factor(c172code) 0.00377
    -#> 3 c160age             0.0476
    +

    Partial Omega-Squared

    omega_sq() also has a partial-argument to compute partial omega-squared values. Computing the partial omega-squared statistics is based on bootstrapping. In this case, use n to define the number of samples (1000 by default.)

    -
    omega_sq(fit, partial = TRUE, n = 100)
    -#> # A tibble: 3 x 2
    -#>   term                partial.omegasq
    -#>   <chr>                         <dbl>
    -#> 1 as.factor(e42dep)           0.278  
    -#> 2 as.factor(c172code)         0.00547
    -#> 3 c160age                     0.0649
    +

    Cohen’s F

    Finally, cohens_f() computes Cohen’s F effect size for all independent variables in the model:

    -
    cohens_f(fit)
    -#> # A tibble: 3 x 2
    -#>   term                cohens.f
    -#>   <chr>                  <dbl>
    -#> 1 as.factor(e42dep)     0.626 
    -#> 2 as.factor(c172code)   0.0891
    -#> 3 c160age               0.267
    +

    Complete Statistical Table Output

    The anova_stats() function takes a model input and computes a comprehensive summary, including the above effect size measures, returned as tidy data frame (as tibble, to be exact):

    -
    anova_stats(fit)
    -#> # A tibble: 4 x 12
    -#>   term                   df    sumsq  meansq statistic p.value  etasq partial.etasq omegasq partial.omegasq cohens.f power
    -#>   <chr>               <dbl>    <dbl>   <dbl>     <dbl>   <dbl>  <dbl>         <dbl>   <dbl>           <dbl>    <dbl> <dbl>
    -#> 1 as.factor(e42dep)       3  577756. 192585.    109.     0      0.266         0.281   0.263           0.278    0.626  1   
    -#> 2 as.factor(c172code)     2   11722.   5861.      3.31   0.037  0.005         0.008   0.004           0.005    0.089  0.63
    -#> 3 c160age                 1  105170. 105170.     59.4    0      0.048         0.066   0.048           0.065    0.267  1   
    -#> 4 Residuals             834 1476436.   1770.     NA     NA     NA            NA      NA              NA       NA     NA
    +

    Like the other functions, the input may also be an object of class anova, so you can also use model fits from the car package, which allows fitting Anova’s with different types of sum of squares:

    -
    anova_stats(car::Anova(fit, type = 3))
    -#> # A tibble: 5 x 12
    -#>   term                   sumsq  meansq    df statistic p.value  etasq partial.etasq omegasq partial.omegasq cohens.f  power
    -#>   <chr>                  <dbl>   <dbl> <dbl>     <dbl>   <dbl>  <dbl>         <dbl>   <dbl>           <dbl>    <dbl>  <dbl>
    -#> 1 (Intercept)           26851.  26851.     1     15.2    0      0.013         0.018   0.012           0.017    0.135  0.973
    -#> 2 as.factor(e42dep)    426462. 142154.     3     80.3    0      0.209         0.224   0.206           0.22     0.537  1    
    -#> 3 as.factor(c172code)    7352.   3676.     2      2.08   0.126  0.004         0.005   0.002           0.003    0.071  0.429
    -#> 4 c160age              105170. 105170.     1     59.4    0      0.051         0.066   0.051           0.065    0.267  1    
    -#> 5 Residuals           1476436.   1770.   834     NA     NA     NA            NA      NA              NA       NA     NA
    +

    diff --git a/docs/articles/bayesian-statistics.html b/docs/articles/bayesian-statistics.html index c4acdb72..189373b6 100644 --- a/docs/articles/bayesian-statistics.html +++ b/docs/articles/bayesian-statistics.html @@ -8,8 +8,8 @@ Statistics for Bayesian Models • sjstats - - + + @@ -30,7 +30,7 @@ sjstats - 0.14.3.9000 + 0.15.0

    @@ -89,7 +89,7 @@

    Statistics for Bayesian Models

    Daniel Lüdecke

    -

    2018-06-05

    +

    2018-06-06

    Source: vignettes/bayesian-statistics.Rmd @@ -117,105 +117,105 @@

    2018-06-05

  • r2()
  • Befor we start, we fit some models, including a mediation-object from the mediation-package, which we use for comparison with brms. The functions work with brmsfit, stanreg and stanfit-objects.

    -
    library(sjstats)
    -library(sjmisc)
    -library(mediation)
    -library(brms)
    -
    -# load sample data
    -data(jobs)
    -data(efc)
    -efc <- to_factor(efc, e42dep, c172code, c161sex, e15relat)
    -zinb <- read.csv("http://stats.idre.ucla.edu/stat/data/fish.csv")
    -
    -# linear models, for mediation analysis
    -b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs)
    -b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs)
    -
    -# mediation analysis, for comparison with brms
    -m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek")
    -
    -# fit Bayesian models
    -f1 <- bf(job_seek ~ treat + econ_hard + sex + age)
    -f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age)
    -
    -m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, cores = 4)
    -
    -m3 <- brm(
    -  bf(count ~ child + camper + (1 | persons), 
    -     zi ~ child + camper + (1 | persons)),
    -  data = zinb,
    -  family = zero_inflated_poisson(),
    -  cores = 4
    -)
    -
    -m4 <- brm(
    -  mpg ~ wt + hp + (1 | cyl) + (1 + wt | gear), 
    -  data = mtcars, 
    -  cores = 4
    -)
    -
    -m5 <- brm(
    -  neg_c_7 ~ e42dep + c12hour + c172code + (1 | e15relat),
    -  data = efc,
    -  cores = 4
    -)
    +

    Highest Density Interval

    hdi() computes the highest density interval for posterior samples. Unlike equal-tailed intervals that exclude 2.5% from each tail of the distribution, the HDI is not equal-tailed and therefor always includes the mode(s) of posterior distributions.

    By default, hdi() prints the 90% intervals, however, the prob-argument can be used to calculate different or even multiple intervals.

    -
    hdi(m2)
    -#> 
    -#> # Highest Density Interval
    -#> 
    -#>                            HDI(90%)
    -#>  b_jobseek_Intercept  [ 3.45  3.87]
    -#>  b_depress2_Intercept [ 1.95  2.44]
    -#>  b_jobseek_treat      [-0.02  0.15]
    -#>  b_jobseek_econ_hard  [ 0.01  0.09]
    -#>  b_jobseek_sex        [-0.09  0.07]
    -#>  b_jobseek_age        [ 0.00  0.01]
    -#>  b_depress2_treat     [-0.11  0.03]
    -#>  b_depress2_job_seek  [-0.29 -0.19]
    -#>  b_depress2_econ_hard [ 0.12  0.18]
    -#>  b_depress2_sex       [ 0.04  0.18]
    -#>  b_depress2_age       [-0.00  0.00]
    -#>  sigma_jobseek        [ 0.70  0.75]
    -#>  sigma_depress2       [ 0.59  0.64]
    -
    -hdi(m2, prob = c(.5, .89))
    -#> 
    -#> # Highest Density Interval
    -#> 
    -#>                            HDI(50%)      HDI(89%)
    -#>  b_jobseek_Intercept  [ 3.60  3.78] [ 3.47  3.88]
    -#>  b_depress2_Intercept [ 2.10  2.30] [ 1.96  2.43]
    -#>  b_jobseek_treat      [ 0.03  0.10] [-0.02  0.15]
    -#>  b_jobseek_econ_hard  [ 0.03  0.07] [ 0.01  0.09]
    -#>  b_jobseek_sex        [-0.05  0.02] [-0.09  0.07]
    -#>  b_jobseek_age        [ 0.00  0.01] [ 0.00  0.01]
    -#>  b_depress2_treat     [-0.06 -0.01] [-0.10  0.03]
    -#>  b_depress2_job_seek  [-0.26 -0.22] [-0.29 -0.19]
    -#>  b_depress2_econ_hard [ 0.14  0.16] [ 0.12  0.18]
    -#>  b_depress2_sex       [ 0.08  0.14] [ 0.04  0.18]
    -#>  b_depress2_age       [-0.00  0.00] [-0.00  0.00]
    -#>  sigma_jobseek        [ 0.71  0.74] [ 0.70  0.75]
    -#>  sigma_depress2       [ 0.61  0.62] [ 0.59  0.64]
    +

    For multilevel models, the type-argument defines whether the HDI of fixed, random or all effects are shown.

    -
    hdi(m5, type = "random")
    -#> 
    -#> # Highest Density Interval
    -#> 
    -#>                              HDI(90%)
    -#>  r_e15relat.1.Intercept. [-0.15 1.30]
    -#>  r_e15relat.2.Intercept. [-0.15 1.02]
    -#>  r_e15relat.3.Intercept. [-0.91 0.70]
    -#>  r_e15relat.4.Intercept. [-0.61 0.75]
    -#>  r_e15relat.5.Intercept. [-0.89 0.77]
    -#>  r_e15relat.6.Intercept. [-1.66 0.23]
    -#>  r_e15relat.7.Intercept. [-1.22 0.86]
    -#>  r_e15relat.8.Intercept. [-0.89 0.43]
    +

    The computation for the HDI is based on the code from Kruschke 2015, pp. 727f. For default sampling in Stan (4000 samples), the 90% intervals for HDI are more stable than, for instance, 95% intervals. An effective sample size of at least 10.000 is recommended if 95% intervals should be computed (see Kruschke 2015, p. 183ff).

    @@ -223,19 +223,19 @@

    Region of Practical Equivalence (ROPE)

    Unlike a frequentist approach, Bayesian inference is not based on stastical significance, where effects need to be different from “zero”. Rather, the magnitude of a model’s parameter value and its uncertainty should not be ignored, and hence, an effect is not present when it simply differs from zero, but if it’s outside a specific range that can be considered as “practically no effect”. This range is called the region of practical equivalence (ROPE).

    rope() requires the rope-argument, which defined this region, and then gives a summary about the parameters and their proportion that lies inside and outside this ROPE.

    -
    rope(m5, rope = c(-1, 1))
    -#> 
    -#> # Proportions of samples inside and outside the ROPE
    -#> 
    -#>              inside outside
    -#>  b_Intercept   0.0%  100.0%
    -#>  b_e42dep2    42.9%   57.1%
    -#>  b_e42dep3     0.5%   99.5%
    -#>  b_e42dep4     0.0%  100.0%
    -#>  b_c12hour   100.0%    0.0%
    -#>  b_c172code2  99.5%    0.5%
    -#>  b_c172code3  78.3%   21.7%
    -#>  sigma         0.0%  100.0%
    +

    rope() does not suggest limits for the region of practical equivalence and does not tell you how big is practically equivalent to the null value. However, there are suggestions how to choose reasonable limits (see Kruschke 2018), which are implemented in the equi_test() functions.

    @@ -244,28 +244,28 @@

    equi_test() combines the two functions hdi() and rope() and performs a “HDI+ROPE decision rule” (Test for Practical Equivalence) (Kruschke 2018) to check whether parameter values should be accepted or rejected against the background of a formulated null hypothesis.

    equi_test() computes the 95%-HDI and checks if a model predictor’s HDI lies completely outside, completely inside or partially inside the ROPE. If the HDI is completely outside the ROPE, the “null hypothesis” for this parameter is “rejected”. If the ROPE completely covers the HDI, i.e. all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s undecided whether to accept or reject the null hypothesis. In short, desirable results are low proportions inside the ROPE (the closer to zero the better) and the H0 should be rejected.

    If neither the rope nor eff_size argument are specified, the effect size will be set to 0.1 (half of a small effect according to Cohen) and the ROPE is then 0 +/- .1 * sd(y) for linear models. This is the suggested way to specify the ROPE limits according to Kruschke (2018).

    -
    equi_test(m5)
    -#> 
    -#> # Test for Practical Equivalence of Model Predictors
    -#> 
    -#>   Effect Size: 0.10
    -#>          ROPE: [-0.39 0.39]
    -#>       Samples: 4000
    -#> 
    -#>                         H0 %inROPE     HDI(95%)
    -#>  b_Intercept (*)    reject    0.00 [ 7.51 9.89]
    -#>  b_e42dep2 (*)   undecided    8.30 [ 0.09 2.12]
    -#>  b_e42dep3 (*)      reject    0.02 [ 1.32 3.32]
    -#>  b_e42dep4 (*)      reject    0.00 [ 2.79 4.91]
    -#>  b_c12hour          accept  100.00 [ 0.00 0.01]
    -#>  b_c172code2     undecided   70.83 [-0.44 0.76]
    -#>  b_c172code3     undecided   22.57 [-0.04 1.49]
    -#>  sigma              reject    0.00 [ 3.40 3.76]
    -#> 
    -#> (*) the number of effective samples may be insufficient for some parameters
    +

    For models with binary outcome, there is no concrete way to derive the effect size that defines the ROPE limits. Two examples from Kruschke suggest that a negligible change is about .05 on the logit-scale. In these cases, it is recommended to specify the rope argument, however, if not specified, the ROPE limits are calculated in this way: 0 +/- .1 * sd(intercept) / 4. For all other models, 0 +/- .1 * sd(intercept) is used to determine the ROPE limits. These formulas are based on experience that worked well in real-life situations, but are most likely not generally the best approach.

    Beside a numerical output, the results can also be printed as HTML-table or plotted, using the out-argument. For plots, the 95% distributions of the posterior samles are shown, the ROPE is a light-blue shaded region in the plot, and the distributions are colored depending on whether the parameter values are accepted, rejected or undecided.

    -
    equi_test(m5, out = "plot")
    +
    equi_test(m5, out = "plot")

    @@ -278,23 +278,23 @@

  • separates different model parts, e.g. random from fixed effects, or conditional from zero-inflated models
  • and prints everything nicely
  • -
    tidy_stan(m3)
    -#> 
    -#> # Summary Statistics of Stan-Model
    -#> 
    -#> ## Conditional Model:
    -#> 
    -#>            estimate std.error      HDI(89%) neff_ratio Rhat mcse
    -#>  Intercept     1.23      0.75 [-0.27  2.80]       0.17    1 0.04
    -#>  child        -1.15      0.10 [-1.29 -1.00]       1.00    1 0.00
    -#>  camper        0.73      0.10 [ 0.59  0.89]       1.00    1 0.00
    -#> 
    -#> ## Zero-Inflated Model:
    -#> 
    -#>            estimate std.error      HDI(89%) neff_ratio Rhat mcse
    -#>  Intercept    -0.68      0.74 [-1.88  0.55]       0.27    1 0.02
    -#>  child         1.89      0.33 [ 1.36  2.43]       0.85    1 0.01
    -#>  camper       -0.85      0.34 [-1.42 -0.30]       1.00    1 0.01
    +

    Additional statistics in the output are:

    • standard errors (which are actually median absolute deviations)
    • @@ -303,231 +303,231 @@

    • Monte Carlo Standard Error (mcse);

    By default, the “estimate” is the median of the posterior distribution, but this can be changed with the typical-argument.

    -
    tidy_stan(m3, typical = "mean")
    -#> 
    -#> # Summary Statistics of Stan-Model
    -#> 
    -#> ## Conditional Model:
    -#> 
    -#>            estimate std.error      HDI(89%) neff_ratio Rhat mcse
    -#>  Intercept     1.18      0.75 [-0.27  2.80]       0.17    1 0.04
    -#>  child        -1.15      0.10 [-1.29 -1.00]       1.00    1 0.00
    -#>  camper        0.73      0.10 [ 0.59  0.89]       1.00    1 0.00
    -#> 
    -#> ## Zero-Inflated Model:
    -#> 
    -#>            estimate std.error      HDI(89%) neff_ratio Rhat mcse
    -#>  Intercept    -0.70      0.74 [-1.88  0.55]       0.27    1 0.02
    -#>  child         1.89      0.33 [ 1.36  2.43]       0.85    1 0.01
    -#>  camper       -0.85      0.34 [-1.42 -0.30]       1.00    1 0.01
    +

    To also show random effects of multilevel models, use the type-argument.

    -
    # printing fixed and random effects of multilevel model
    -tidy_stan(m3, type = "all")
    -#> 
    -#> # Summary Statistics of Stan-Model
    -#> 
    -#> ## Conditional Model: Fixed effects
    -#> 
    -#>            estimate std.error      HDI(89%) neff_ratio Rhat mcse
    -#>  Intercept     1.23      0.75 [-0.27  2.80]       0.17    1 0.04
    -#>  child        -1.15      0.10 [-1.29 -1.00]       1.00    1 0.00
    -#>  camper        0.73      0.10 [ 0.59  0.89]       1.00    1 0.00
    -#> 
    -#> ## Conditional Model: Random effect (Intercept: persons)
    -#> 
    -#>            estimate std.error      HDI(89%) neff_ratio Rhat mcse
    -#>  persons.1    -1.27      0.77 [-2.83  0.26]       0.17    1 0.04
    -#>  persons.2    -0.31      0.75 [-1.90  1.18]       0.17    1 0.04
    -#>  persons.3     0.40      0.75 [-1.07  1.97]       0.17    1 0.04
    -#>  persons.4     1.28      0.73 [-0.27  2.77]       0.17    1 0.04
    -#> 
    -#> ## Zero-Inflated Model: Fixed effects
    -#> 
    -#>            estimate std.error      HDI(89%) neff_ratio Rhat mcse
    -#>  Intercept    -0.68      0.74 [-1.88  0.55]       0.27    1 0.02
    -#>  child         1.89      0.33 [ 1.36  2.43]       0.85    1 0.01
    -#>  camper       -0.85      0.34 [-1.42 -0.30]       1.00    1 0.01
    -#> 
    -#> ## Zero-Inflated Model: Random effect (Intercept: persons)
    -#> 
    -#>            estimate std.error      HDI(89%) neff_ratio Rhat mcse
    -#>  persons.1     1.27      0.79 [ 0.07  2.68]       0.32    1 0.02
    -#>  persons.2     0.32      0.70 [-0.86  1.57]       0.28    1 0.02
    -#>  persons.3    -0.15      0.73 [-1.38  1.14]       0.28    1 0.02
    -#>  persons.4    -1.30      0.77 [-2.66 -0.11]       0.30    1 0.02
    +

    By default, 89%-HDI are computed (a convention following McElreath 2015), but other or even multiple HDI can be computed using the prob argument.

    -
    # two different HDI for multivariate response model
    -tidy_stan(m2, prob = c(.5, .95))
    -#> 
    -#> # Summary Statistics of Stan-Model
    -#> 
    -#> ## Response: jobseek
    -#> 
    -#>            estimate std.error      HDI(50%)      HDI(95%) neff_ratio Rhat mcse
    -#>  Intercept     3.67      0.13 [ 3.60  3.78] [ 3.43  3.93]          1    1    0
    -#>  treat         0.07      0.05 [ 0.03  0.10] [-0.04  0.16]          1    1    0
    -#>  econ_hard     0.05      0.03 [ 0.03  0.07] [ 0.00  0.10]          1    1    0
    -#>  sex          -0.01      0.05 [-0.05  0.02] [-0.10  0.09]          1    1    0
    -#>  age           0.00      0.00 [ 0.00  0.01] [-0.00  0.01]          1    1    0
    -#> 
    -#> ## Response: depress2
    -#> 
    -#>            estimate std.error      HDI(50%)      HDI(95%) neff_ratio Rhat mcse
    -#>  Intercept     2.21      0.15 [ 2.10  2.30] [ 1.90  2.49]          1    1    0
    -#>  treat        -0.04      0.04 [-0.06 -0.01] [-0.12  0.04]          1    1    0
    -#>  joseek       -0.24      0.03 [-0.26 -0.22] [-0.30 -0.18]          1    1    0
    -#>  econ_hard     0.15      0.02 [ 0.14  0.16] [ 0.11  0.19]          1    1    0
    -#>  sex           0.11      0.04 [ 0.08  0.14] [ 0.02  0.18]          1    1    0
    -#>  age           0.00      0.00 [-0.00  0.00] [-0.00  0.00]          1    1    0
    +

    Summary of Mediation Analysis

    mediation() is another summary function, especially for mediation analysis, i.e. for multivariate response models with casual mediation effects.

    Let us recall the models:

    -
    f1 <- bf(job_seek ~ treat + econ_hard + sex + age)
    -f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age)
    -
    -m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, cores = 4)
    +

    Here, treat is the treatment effect, job_seek is the mediator effect, f1 describes the mediator model and f2 describes the outcome model.

    mediation() returns a data frame with information on the direct effect (median value of posterior samples from treatment of the outcome model), mediator effect (median value of posterior samples from mediator of the outcome model), indirect effect (median value of the multiplication of the posterior samples from mediator of the outcome model and the posterior samples from treatment of the mediation model) and the total effect (median value of sums of posterior samples used for the direct and indirect effect). The proportion mediated is the indirect effect divided by the total effect.

    The simplest call just needs the model-object.

    -
    mediation(m2)
    -#> 
    -#> # Causal Mediation Analysis for Stan Model
    -#> 
    -#>   Treatment: treat
    -#>    Mediator: job_seek
    -#>    Response: depress2
    -#> 
    -#>                  Estimate    HDI (90%)
    -#>   Direct effect:    -0.04 [-0.11 0.03]
    -#> Indirect effect:    -0.02 [-0.04 0.01]
    -#>    Total effect:    -0.06 [-0.13 0.01]
    -#> 
    -#> Proportion mediated: 27.91% [-74.77% 130.58%]
    +

    Typically, mediation() finds the treatment and mediator variables automatically. If this does not work, use the treatment and mediator arguments to specify the related variable names. For all values, the 90% HDIs are calculated by default. Use prob to calculate a different interval.

    Here is a comparison with the mediation package. Note that the summary()-output of the mediation package shows the indirect effect first, followed by the direct effect.

    -
    summary(m1)
    -#> 
    -#> Causal Mediation Analysis 
    -#> 
    -#> Quasi-Bayesian Confidence Intervals
    -#> 
    -#>                Estimate 95% CI Lower 95% CI Upper p-value
    -#> ACME            -0.0158      -0.0394         0.01    0.19
    -#> ADE             -0.0402      -0.1186         0.05    0.33
    -#> Total Effect    -0.0560      -0.1371         0.03    0.19
    -#> Prop. Mediated   0.2401      -1.5970         2.25    0.30
    -#> 
    -#> Sample Size Used: 899 
    -#> 
    -#> 
    -#> Simulations: 1000
    -
    -mediation(m2, prob = .95)
    -#> 
    -#> # Causal Mediation Analysis for Stan Model
    -#> 
    -#>   Treatment: treat
    -#>    Mediator: job_seek
    -#>    Response: depress2
    -#> 
    -#>                  Estimate    HDI (95%)
    -#>   Direct effect:    -0.04 [-0.12 0.04]
    -#> Indirect effect:    -0.02 [-0.04 0.01]
    -#>    Total effect:    -0.06 [-0.14 0.03]
    -#> 
    -#> Proportion mediated: 27.91% [-176.9% 232.71%]
    +

    If you want to calculate mean instead of median values from the posterior samples, use the typical-argument. Furthermore, there is a print()-method, which allows to print more digits.

    -
    mediation(m2, typical = "mean", prob = .95) %>% print(digits = 4)
    -#> 
    -#> # Causal Mediation Analysis for Stan Model
    -#> 
    -#>   Treatment: treat
    -#>    Mediator: job_seek
    -#>    Response: depress2
    -#> 
    -#>                  Estimate        HDI (95%)
    -#>   Direct effect:  -0.0398 [-0.1239 0.0420]
    -#> Indirect effect:  -0.0158 [-0.0422 0.0079]
    -#>    Total effect:  -0.0556 [-0.1437 0.0308]
    -#> 
    -#> Proportion mediated: 28.4146% [-176.3884% 233.2176%]
    +

    As you can see, the results are similar to what the mediation package produces for non-Bayesian models.

    ICC for multilevel models

    Similar to frequentist multilevel models, icc() computes the intraclass correlation coefficient for Bayesian multilevel models.

    -
    icc(m5)
    -#> 
    -#> Bayesian mixed model
    -#>  Family: gaussian (identity)
    -#> Formula: list() neg_c_7 ~ e42dep + c12hour + c172code + (1 | e15relat) list()
    -#> 
    -#>   ICC (e15relat): 0.032210
    +

    One advantage of the brms package is that you can compute the ICC for each sample of the posterior distribution, which allows you to easily calculate uncertainty intervals.

    -
    icc(m4, posterior = TRUE)
    -#> 
    -#> # Random Effect Variances and ICC
    -#> 
    -#> Family: gaussian (identity)
    -#> Formula: mpg ~ wt + hp + (1 | cyl) + (1 + wt | gear)
    -#> 
    -#> ## cyl
    -#>           ICC: 0.14 (HDI 89%: 0.00-0.68)
    -#> Between-group: 3.85 (HDI 89%: 0.00-34.50)
    -#> 
    -#> ## gear
    -#>           ICC:  0.53 (HDI 89%: 0.00-0.92)
    -#> Between-group: 15.34 (HDI 89%: 0.00-103.63)
    -#> 
    -#> ## Residuals
    -#> Within-group: 6.01 (HDI 89%: 3.64-9.03)
    -#> 
    -#> ## Random-slope-variance
    -#> gear: 1.52 (HDI 89%: 0.00-10.38)
    -
    -icc(m5, posterior = TRUE)
    -#> 
    -#> # Random Effect Variances and ICC
    -#> 
    -#> Family: gaussian (identity)
    -#> Formula: neg_c_7 ~ e42dep + c12hour + c172code + (1 | e15relat)
    -#> 
    -#> ## e15relat
    -#>           ICC: 0.03 (HDI 89%: 0.00-0.09)
    -#> Between-group: 0.35 (HDI 89%: 0.00-1.30)
    -#> 
    -#> ## Residuals
    -#> Within-group: 12.81 (HDI 89%: 11.77-13.86)
    +

    There’s also a print()-method which allows you to specify the HDI-interval and digits:

    -
    icc(m5, posterior = TRUE) %>% print(prob = .95, digits = 3)
    -#> 
    -#> # Random Effect Variances and ICC
    -#> 
    -#> Family: gaussian (identity)
    -#> Formula: neg_c_7 ~ e42dep + c12hour + c172code + (1 | e15relat)
    -#> 
    -#> ## e15relat
    -#>           ICC: 0.026 (HDI 95%: 0.000-0.133)
    -#> Between-group: 0.345 (HDI 95%: 0.000-1.999)
    -#> 
    -#> ## Residuals
    -#> Within-group: 12.810 (HDI 95%: 11.553-14.127)
    +

    Bayes r-squared and LOO-adjusted r-squared

    r2() computes either the Bayes r-squared value, or - if loo = TRUE - a LOO-adjusted r-squared value (which comes conceptionally closer to an adjusted r-squared measure).

    For the Bayes r-squared, the standard error is also reported. Note that r2() uses the median as measure of central tendency and the median absolute deviation as measure for variability.

    -
    r2(m5)
    -#>       Bayes R2: 0.168
    -#> Standard Error: 0.021
    -
    -r2(m5, loo = TRUE)
    -#> LOO-adjusted R2: 0.145
    +

    diff --git a/docs/articles/bayesian-statistics_files/figure-html/unnamed-chunk-6-1.png b/docs/articles/bayesian-statistics_files/figure-html/unnamed-chunk-6-1.png index fd7f75bb9956a740ea9b3981fc76e9a88752fa7e..7a907378270b855b0702f126827c2b211eed1db7 100644 GIT binary patch literal 26515 zcmeHw2|SeR|F$zmLK2FKI3-b8PDcn~Dk8E~C~GT|Wv1-LGG}rm$(HSuZ6Yaa*|(Xg z926NNS;vg*V;N%~gE8+j>YNkxe}Ct<|KHDhrlV@qST{+ccKyd1v4^=FoMC1V8DB}QN$YuATM>` z0&aJ3h>CKEDshM+I+Oqx3%HeiBl{HrivX@{Si~DxHgFAi;?sPaGmsrQ5kPh@-~zsn zh{(>4$bJ)%JrMB*xL8r@98nJHQS4lSA^`6tQ6<1dj3NRTE3KkF8)lRZgJnkmSq@|m zu%5r^C~@E@aZoRb0xpM=65uK+Arec7tP&Qhw7Q|qHa0yy%_?zVy@_BEqgcce_U!}NtbsQy;3jq{fQJAX0RLuf z8T7OSx`OA1{sngqj$I+_zu!d;PF>^R_@3jG&arboNmLB;iukZ+29K2iwDUoSu(E5k z7gkPIVt@VZg1$RmUXBydu-e<7I{62AEG4YRZ0}rh;CXxQGziIPtIM4zg(btz ziB25z?oF8q+IY@e({n7XhZ`PnjN9>Unr!`J@$m`{U9;HolOXf0x(~MVoHN^c?ebnz z;G@uJ$TnTGeSiNK4*Nh%$w$Y^kelBM1bW=KvE}$qYwvT0cg8#0G>Nax>05TycsbmM z$nS28fgDP@6Auwgx`VeXRf+*S@w^~wvp#HRMDm@MNo&2qA(JLeDAPxu@R+}e z8qq1*x%s0`5qPI4kH;OnmckFocPy&EV?Vin4{CFBXC2oQ;bx~JKM6MrL2dRDcB?Oy zz4X{)#m{|Xj}<7K&j=l-eGqkfyN3(OO#bcPn>-AxuH*POsv*a&#tA9H#-NB?jV$TE zQJ_DQqY!0Fa)8Z6b-~=*oJR!ZQMJ0DU1^1Xqjf4EoZo8p-8{1&KjKcIx3D{5qp^-- zKJmdRP_aH*hn@4DBZA2onBzX44|`BTy+AA9{ix$HUy=M^uW1LcaE)g zxsa^j-#I=8xTl8FDYOVr?8bjOllY4$yAJe~u?VMAe`CZQ7lcwx zc(zvm>&x(uR7BbeEviiEs7SZEzh<}u?S$INEvB>-v0N_makjtADD7~B9kp(#O^<}SQ(Z+=6=-Wf5WEsT8MJ)TZ3#5SJzA?`0l@9*91U|F#k6o6wg*@&wp(> zd@W2l{HvKoT94@sHow0Hsf($^WK?I*l?u^sOfvpgI3j6IH0M$rF@( z@f)z}Uj#0H-MDqZJr`1{`i+YI87TS{P#4-`I`B1YDu9%#{9{i@*oZoggl(ae{DX#r z(D$a}Uzeu>QhNALmB+>g?YvxWh9>U#Q&xq79Kmzts0rHoTSiEM6bh!ywch@rpFrz{ zX-M@j#^>diMw?4T!o>2mp_WMZN~$MM7bB1R;)OffO@b7auNq)@A0a)MXLcp+ZMbC0 zDYUituaWDlk{Kb&Nb_qjqM&RF-`VNtEo#sBn6=RZqunG-5jnelR3R)t0kdtu?TAv_ z{!Z#*&%xb@;2m;JdSW;4Rm9o!ZnPzJzVls|? zfS>H=^F|v zbh<@}97vT=SXrLxtm8|Br}(0@DsAm`4_M~6`7rI9jA@Epq6R3D`DOn^YF5&HcHKC(oQRM zI3?aurP8DQuEhx7?BmMtXBW=F7XN>yZr^f#eWB;@Tz z$TmK1Eo!Db=Rmu{KAZJ)U9PUA)T0tdCVV`7-|);nJ(6(&QQ*A|%Y1S_3+%LGoz+g) zMTc3>QB#~JI0HP6l!ZU@t~Z8-YWkV%F?Vk|V9(IrOHf#6oy}r5&f@Dr8hb2vKK2p8ZlS!roC|4i z>}@j6&bRlyu?OY#RpCycD-8&EZ%vMd%SAG%<7<(EP`W11*0rW({C4VEv-x@fBrGN| ztXG#o0aN|kIiJg&^b?X@XC$v}e1I3(+$;apXJDt$<`gjRTIgVSZ+@@oI%D=9EGb~% z{|Ytvau%?SXX!sv+`o$U#oF;#tNra+@oyMwcu~qL{r6w<8iQ_Ji02AIP>$bLu+{zn zHe-w$K7HK+!6po|AR)Jx)$k6%-yr7nLA!<qX*he@NS>evL2tT^+n@&6Ql>uKh5VcXKU+!Vx zmvrYfYg9z*@HL*ISm=6I*abizQ`1#8s64ryE$ir`bgqgFEk#>&Lnx)jdMzV%Ef!Mh zISd2u%plCJ;h?3!>ZejQ1flO9`7eG+HYd1))y+~gwo+=gtt(qPL=oMzi+47S*GT(I zGP=M5EqZ@Af(!AY@X^Q`QW_0OP|dX0@zdBw(cQW>H?EJMfeVgxAX@{ErFDGyNdF#( z6x(?Sy5-9>0jEHkR^2;BiX2Pft*j)ntp}r?yOkb^Ek57v!+f><_1nI-i=7W!*=Xq4 zl;>kDx(@nm%FO9Mib~c%NytY*Xr=b#h2F!cJZnuhYUsc{c~Gl%sEa6!g5`hFMifn9 z04=8F<1#~v=Zr;E$!Z!)<1h^|O_>v@JloZzhgtG-zJp*ZE06BFJtZ~h9`WC3aYkaR}Eq+60SywFz7iIEPNAAqbs?QAYRX=MB-;vR@3Q#c3c>aZNZ}yY~I^ zs77N#U098nBz@Mqs*F*ll4`Uo(kXe~LTzcBR9{Z&Y>X7hE`P!^Tj(ty8V1)oB9>$)$+CvYe!mG);B1r7h`hAAb%R#o?{-!2dn7e%j%e9gI^HZba zS544$0sR1`Ox+@H`3xKQ2~);98c^k&GRUeDsmUv6eY*;6F>i;7Ni{>?+ujm;Dr=DZ z2qG3;tT%A&bab}+;|vI2|2DXS<;W(YO(k6BmrU=m{7TV?uv3nYuJ9b*_lp1A)(pHU zI=n94Q3}AA-WL105bx>KQ%X&8mKOqTafL0e)VD2_Pso`ehQGeOMyfsM(879u#s7am zoQrPxuN5(EHBE>7K%QOzoHmBWXUpO88oN6gj|HJDt>xBHAxxhtkrN zvp-tGbG5vFdTcRlf~wrx>8eP$;vpLNEs!Hfje)kqW2EM-Muuq{|S2_ zaKzWF#VW(RMQYq#sb^D?-8UDNjZtQyQ}x`{<|;{?)SF?g(gd@$?u5nn;4Yg6HHq$x zPKFkWIbbVe%gDP8rv?v*<-IOFSk(R~VuK}1n$Wl|)${}>#=NJpd`OF+7xGc%N#9jR z+I*&4+$vo4zNC5wd!a|ziH;)s-*E6w*-T5h4{3U~*1wamy!X8rXGrH2-H*Qxk8Xb4 zcb_0%YCt>nxWDhzUcX1=G;3SXF0-|sEk)X%Gc!r*zi`D1l+eKy6@0T-n^iw9I`-Pn zG`+@gTO>f5^4C3OFG17jTWz}PxA{}R6$1$qzRw1(hQ@zBqmeD-Ka=^tVgLUN27lpo z`^$J(%n~r5hHom0xhF>!u{0v+|d_ zY&||Yv<6jQE~*2N)L^Kx6FY4xktX{aL|}WG0mJtQ$QGF^Tc5u3OU1y0Pzckv!JcWI zqlMAA{-Bx-IMplftRmI^f??1uJ2lmIGcbZjD0$s?rJx$J&Fy^zUM5^86n}}0Et73^ zR$bsXI}K4HeapVvplVMbW2{>ZGdY2JYYOXifRLh29M?f-=)7e{9RJd5KCX|3k;}P3 z^701b-dn%4LIw)}gimD59P>Iz=om9_-_EXxqo^R89e~<+?KefD0-oZTTcD41hJRqg zM!dovwyjSn8$XGbYAn|Sh2N)$*Rr(?ow{H)Qs0@-AVN{;>sCdz?avYz^EO>w6+e5* z^+CyKJX08&yfVw^V{X37<`O>p?fH(OgFbK2a11Sab6A4!gWL0D{dL8N z;zC^Voxnhd_uAH#Lo^L?Vn9t8<)vo1Cc}iig!G!^Xk$OGB@1TL2uU{~-`_NVMX?U6 z32&FYw>wD=#viYV$hKkIPSM)M>8- z{H*0DOHeUQ`?4~A`v@)Ry7ovD0q5%}aAQC` zG}fVVNTSc0aX?Fej7d>b2D3sbiH*p_)>M*2&<@_&r?)>y%&qXRChgRvb~>4WsAmj3 zHnU9z%J?3@yjq2)${r#GaYL3p*ReSfMLi2;nYjL%#=7Dojs6a=FUJ)&k#m!X$>Pj~ z=MS?ULbUzpH(o3___le3cEVA&+*5?051Q#}wjcHOf0AEJ0M?P0yNH{QZ=JD;%H^Y< zV5y0kHEI=>%IN(8OQAEflTU@kf!6kX)>!8Vvu67Avt-@e4@v9`a*Q{<(pEl^fT`jyIXtj#rsCsUU`|8R0#Wy{mlW&`xt zTEWLGo~z>0hOWvK59Es^mxy0bo69{nB=mmbl||O_gQ*8i{SPkHx})_-T<9U5j0v@O z(ZdkFiD@x5eH!{3eOeB3{or}DOz=Z|j@@~lqNF^DZQMPdf$3pAs@`DbUc#aO8JG4= z;|0HM0GhQj@fx-zpb#tb0RXQIbS1qI@d&$rcI9Ja^f#M8%MwkE(T687 zIzBh_AsGfY$Zx zUP5%>aw$?ws(xw@<_1YhitTUuth59&1YD&hN^iHL;)Rke=q1r?Z{24NZD=zY&!tY? za`X&3L$|J7Q+d_F^jUxj&{2;dGQKg=gg)p>RfVDm+hz4x2njSKRR=99=$LQ=)z;~A zZU4TtDJ!p{PChN<*F}4F|9WJedp{}<^}De1P*+H|^nu{n*eZ)ylZ6GvLseaFSS^-hdz zIf=KZF}5GEHeK*W4{FlPUA_+maVMqOF0s$37c|aj65UEEuNzYXcX1^#bR~y7B5N6v zv9casg2#>sVw?eLbK+hbsiitGGEq??!1+bl)M8K{;APGY&)(b)6)Kvs6?j?DAS%1% z=fPp~KApn{`~Yk@R{3GOIon&dsxsY#N+H`8fiO6zTYpG)J6=%seEO-+kjn&}SkozO zr89gL&Sm8gq_6g%`ls8!r;|{$Lj8_LYb>?SV@oceq2c?EY4pr-$W@A?^5zD=KP|X> zRvfIpk(lAWodB35fFO5&*!P6YkX%T1D73J?kXVx?#>M+vVSmIz>O#+gNSjH|F9^4& zZGwJkj-=vnh$N<@Y>HY}ouK7cdS*zqawa~Gi1FJZ)5rUpKkJs@ePLo^Q3buMS)P7*b-CCb5*lHr^%W;LiTa?6?|qr z?J(1$4p;rjEiS#jRKmX%T#>;}OEGfciTw$0NB2M}z_gw^Mz82&)yaYVQ=KE)oxTk* zqmvj#w#(o7c;iT8OZv%Jj5$Y{Z8|`E-ph4EtVr+G&H{<#;}#!HR> zMER077jm00<&^u`Gpf&WNFDaqHfoi3l^zOjSoIz;-)#DZ#!U-yhK+@uM*3H3iw zj}~mDyuIZ9q;RGmfRS!--*vP_LG)VJC{if_+bD&^VE$AIm3$G9W=ZEjOLZK?FMHF= zx@txyxB|OY;!ewlgmKk{oe6iGi_x8TQsboN_PwG%>E0oedNpx#E9MKA z<0KZKH=^x9B%8G~qqicVCb!N}QjrjVR+UVegAE);&BJ^Vi1RyUX9K+@DNfrsRqX}f z_rGQ}D@CD80IwNjY1~zJ_pW71S9(%IKf~IQ)Elt0_e{9~ccA<8+hFyyb=9{+dp`{6 zd}T-tqSA6jia?qdW<3|C0KB+BxA>Ufb`f_Qj0jw-!6z3(uJ4$0Ou4lx-B<^)|Mc!4 z47N&oX0u7L9=?lbtHa-v`7cQkZ1;XkDDlbf^_O4%OIqVE__;>u|Gx*V`~}*-2vzxy zDQ85lO6C}yh?EtSajsTn2Z0<`03_Q1t#vhsv#@s)e5rrLvV2zVU{W;kQOFnjT*B>M z-RAxj_ry+OUR%@lq&pTjV5FCSy86T_;l~beN|`IWbM}d_>@VRPLOsa*j<0@uh-UT8 zSMP@)Xe3G#u{#?7qL=@3NX(}xs`r}GW^<6ByeGLMrt@(3qtA$GwG<1%JkBzS$k1JS z+hj#XN;ErZxAXNr#{sR<>653NFI}(b98%!wa!5N#f8N_q_#JTqQZoMrI{fB6tRKp1 z!?x7+m;k2*l|MiyAd%9V4=1K}bc>-%cH}oRp$p*8Oys9-k(_zr+IsyiQ4y8rmU7Cw zXYvcg3M39Bi|zZR@N7300D_3{C%-YKKp+=FumAGF)6@ zjtRcJvx+%4;`N6Yk3Bd6kn`zWh@vtjD{m74xE2iSGn-q}S34$fH^Eq>R3#7aVLJit z==rce2#=yo4G+mDik@>-oPMr9Jb&y00K<3i%+5}vZKE)CW5l#=FkTUmNTRYGE`Vm1*Nx&KF!ewQolW3koQm`P& z0mN*geD6UR@6JzvcRN*DgmPOg6Ua9IF? zy&7y!g)5cRk1!IVmY*7I3dPmkyx?7He956*+cN_%0&)fWR!zoKuOiBsceU}Wh%)oh zDx$p0ZFw9sMRvQ!!=EMJE}0n5?|b<> zXIUa~m^tCld-1|MlsEpwf_tyd;lhbdk-UnK|2xRs#u0efpSV~)1J=k!A(V_ykG%^YVL0d z6H^pS{UiOnE0403ztfN2t*8;EH)1#x?V}*lgy`S=I3}Xd{-XX@;b_v}OyBd$nI5y7 zd&xLn`y_=*CAM#5hml%ZbHtvP5yHNy{1E$0?aOkx9sp4nt@ zs&Y@ugpto02~o49-x3E=UH3#=zSwv( z!4%cuVT zrceDpLGz^4g8nqhxH@&6Xq0l zwZuM6;rJZy4>;12?_iJkF|WF7gS)OR$9mwFNGL=!~{^j2QMmb3XfeDzh`}t#KTLQwlh;Ku9)CxY^L_sru5MJp*TeYQ^Z6 zo*Tjc<2eAGt^Z)qptBUXE7xw{nw^nskAis( zt=xec&XQX-@##uJl$PqYpIT2BTo~-6UT&S?>dGY>u91~tkf2?vjosRU;W{)|QlfH& z01!je)mK}J>B{n-hYeQ2y&TmK!dJ}Qg=RWk6MZG*$vG?r;4Y7T2njPkFxgUS@p168 z@^BAo&tjYNJ*~q%_JYYlWO_xrtcL&?aF`!=KZ451yf^m%uR#)nmuEwAA(0QgF(i(9 z^N0hNL_YQc89V``i|h>IX+3>OL*y{%E`)?9Egd4ND*2&MAGz0l*CX82!uV|J`03$O zp35ZFkA=iJ^T$I!Q%20B>d6NJp4&5LTx%`VG+9FTCCx*3PtniwG(Blash1wUA2@KO z$L9m@w9k#e>5id@y2hi0FRt06sbXE%p`Jel0&Z4L0YJ_d$GF;#H@$1~KGJ_&jH)v< z{i{PlOQl!BnQY9Et0pStaOt7q7D8Jx(;NYm+4a_w9+I56T$tKq+wP+PJ?rPW5y!uJ zngxO$GU<)L+VFTZXZ(0kBT|mvhaDj_{;S{njYT#YU;#@&JGf4vxO4zuV)?`X^T1Ai zlaEVjv$m!UPf@7>k3^mVD#dawXGph1*WBvYeO6nzcww(!aT03YV*jfkKyb;1{a>7n zz~&>9vc?_mH+YmbNUa7BG0?Qmd2y)kejJA(I(vP555Oq8+XMi9@S;iuWI=^i&#oZJ zZ3*K`mc3!-w=eE`%_;<|i+uLEOpE|l00L0df=JwmnO7HRo6VFi(&95SL*kB^(7frW zJc~n&?|C6JbEAb6=&R z9Tq4IO6cMy_E^h9`F#o~JhS8d?aPIewA`2W0d@HbsEDk!&hbhSS^4ypcW2j(`L!;i zzrBH6zXb_%UkxV8l|sE<-(p`KWr9G0E5g3%<6pb{{&5=1UlB%YP7e8t$ppxgKh910 zi^*JjYRn(F=L#AfL5#U#b1Ixwp0u)Mb`Y z(fzJ?m4RA{hFnQ+Y9DmS^K2@)(MpO0?-UEbDyX{W`jrG{PXz8IbWmnI%O5zC3gvA) z=~AfLld1U)jW%N`uDw~6l01i17X58uLsoPbK1&i)9}OHbCM*R+Hm9Aw2hNhc`{N;@ zr@*mezy4poK&b@)az`K%DX-y|`#_8mwc=;$%&H_*ZGNfQ+Pt?XuxjkbeS;&tjpf@` z#^=-CT58>K1Wc0`slxqrI!(=&13{Rc&0%8N-JZLNLuK;IacY64#xw;78Nfv@L=kr@ zDr0;cFWeFM5^o$SG9ye`1+bpD;d!yKkxOS6tY@4V7o=F70a%ke$?=ft>DlKy-ZsIS zUyU!1W-KZTIi@9@_g&N!lM8c7bXh^l&Ri)iS-*HOU~Y*?A6<)hs-78am>Q%ns6N1bBFa4wF^ILqr7mJ_YWh0JE0pY!jiB~C;jXOl||jYR$|#Z_O5_Z zxa2-R+1?WV%)Z61J98nNOnTEi#TKz)M^Fx65 zxbr9%;&A?nkJlpNMU{uzXF9L5Tsns{^iN2kZf7ihhxxI^vDa}wq5p!)Votf@RCt|Z zrjjP>2W?adb7fqIcUaiY3T5ti=4<1{~<4-&ff zA)GUvm93E@bL9b54}s5R!a8|p#|z6>cDEE>;4Pxk#bOJj|EbgP{^t349w!;Im5RH> zmjTNJo+|F)TOPm=5ZG+DaFRZ%X>Bz?el;9evOH93uTe%;SWdPVP&LP>pASgvwr-oR zQN(CJKpn6yO%8IlUygC_(UPdz--NVcifhrCm-yQ)w3GN85`~ZqlHxgHU~gpRyzl#y zaVkO-|LI;s>JI_UatqHD2_{_$xNhviTf<;E`O~Pt1xZBgaTjiG`-duWmG$CXn9O&@ zG&onvLx=Qii+QK*dgLNQ>-dFbjRsdFtjt(&v^zClk&uxI3km_B<`P2Ubrw zXQ1KpLfv4grrqe5RzkU5l^y?>r6pe~`UMA9g_pDF)~Bwc3`LxE_&C3H*G{UP&BS~7 zDCZboviJ4}G9<-k*Z6HwO-Q$ocGWzG&F92sx>U8)3rUhMsQl5ZrA#yKg4_OCKP(zLn6gG9E$v2bY#egrUZr83zuqKLkZ|H3+^bH&jqd6 zcuy2AZbLMm6&9iZimT!KAq|(DmA+Sv@B;-46Kr{kv}!Rgw|Z1TveUff2{(npl+0@oC;|@J=K7Ny(osHvS#_XxfSIUT~+BE zHr94$;n-q7kc3F)<*6s*l@eFV!tY;?^Mn$wE3PPmJd5Y)uEh<*v4y7@*pQ$x^9=8J z>3icAG}pPvT&)u`uj%cv=c``z;rq`AcDgi97(mHq@;!60Z_au5p=*jYY%nu51--*t z^Jk?{cxg?P9b9y|M?3JD7(d1xEiiQt9s{Wi6HHx#$q*C+a=ouz_HD#r-mxyxT?0&B zs)2Gla^Z{joWxjDV+ZeCQAa5>ewdKy#)sIx$g?PgH&}SVE9WzxCtX9(h;pV&okwQ7 zU~_!Aqs!?}@Ts}`U-+CbBT63W(PrJuk8Cx-Ik%T<(F-L-TP-z~xqJ2_YXj3Mdbz75 zJ7|9Ndc>XVn^Uh1gUh-KFEyf8;JDjOY88fUnkcgK#I5VKDZl}RR?0z)S&iGsj)#k? z+OQDwfTxRreyw1;2AZT%7v{T&W|w{wOXB%zo0XPRWqU=5jMhkj8@Yi!>X+wAL|J(c z^8zrr_=K|-_E=h#J6ch|@*t>a~@iSNG-DXp6XHTFIw|i=LK4H#W9f=ylP-ov)wZQ9WiGm64U5l0i z9fJxh0b*dQo|_l_+N`fNrZA3=rH*Gyx47R`m1{A|$1TW6%8y~}L{_Snf=y{3=hE&? zFZC@me6^i2*wgBRBpq?twCUuiiO-xGa>{Q`r@xZ4+m>n0+d6QP{c;M}&+X+jm*aZ~Ac59T9%A0fd4v{&M7T|K3P3f_s%{9_Fl zw)G$-w>*Vs*{_yM<3zqZ(7Vy|_lA%8C0Lu0>njX7yAC$kU(Bwx(H8UH+P3^O?nfbt zOTkekPbwdC#}MXKK&Ybk%>9)M*>l5?Fkh0JS3%P4b3F+&SchPlIo;d1gU!Qu2=}Ja zaL;#}sYYb#(vP_Iys=WH4ppf<0~fxj!A;&Zm6dI1MwJO69KP58Wzlhcsq|;k}nN=4u8+d+aC9!!?2e z#5%H=Zl=bJ%Zxz;3QMpfh~Mn~3~iu!9HK1+MuiMd0+@Z&*zZ!kRBuh{zD;1AhsckoYAW$eNb&b{8HyvF{y(896&Rk@a4bMbSoj!233N%tTu42Nn@U zlE))4^@i+so?U*F(rvQp+*SZ<`Bt@3+Tqt2ea_mQSA2(rY-x>}suf|>?n1`>9=ymf zlJYo`a!FTgs(_IRxej;vuj*3;`nXL|*P5E;J80WI& zV#{8QwiwPzUIDJr9@XP$Slpano1(|}rnr$ukZghK=?e?<=VlCkDyd8aS0IKP91n)W zR?OJVR{nS~%6{pj@*6Gx>z+OI1Z^j{vlaixiQRZVVlt;L9X0P#fT*p+NV@yN!@}F= zkm1kNP;D-6m6lc7ez2tYaVsw1+^im+NJs z7gc;a>`ut0Tz=+BK3dYEb=8Bq)TDht)!acjZC83Jf0KC`6yVN(ZkCqrF@}q}=|r zrkPaeB~=ppCF84~G*0g}t`zX74n4(^tJ+ViVfi+CxQvX&UvJXfC71C&fjCKiv-`?( z{yH1YTURNAg~y$}uKRAGIe9LX943Ff;3H>B#~uRBW#^1p;0TW(ySQzGk=VtsdPS9M z%r*&%_QHFU?kX=?LXV>41__s~tkoss-g|L=7I`#fXefVRcEyV@x*NZUf72VIKgPwo zvh!?6QJj|RO1nk_?(NKSrhN`JptnJVn$sj_WAB=`fZP*D7nt8(?K=Cw_S(VO$-Oqn zQ{G!>&ZOn`UtHYBxVn%%vZxC4Hm__;*)7o2Ao!KVS-8`Dai7dg@I2WYImlz9lLe66 zspl zDhbj@e+S&J-XP|HP>q|oKww?>&~bPH*SG`T$xd-rn}-ozIJEB>2GdS!p`8|+bnhs& z)dBI}0cRcdeQ)a&FvXndsS5j$a9d^(PNTc3MngFFpboeO&h*dMk74bu`MQ0_qAp>n z`-Gs7f9~|=zlm4>1t&!Rx1W)Bas?M8U(k|U=W;W%GTXxp9+w^M`m+%08E!V5=H(ry zFq-S8>~>>PI~|$A zupV^M0mVVQxBN4=(WU6|P4dVoLt3QWuy>%Q*GO>TfypjfK;%o@jn7X6?y1KyCo^tt z=#GYDo0bp;s#!m5lSdh#HSQoPB}W+7AEUiwOdlRoG+iF#n`M~bY+N)?qH)Zw;XoCq zFfd9>tqj}S8p5rZ%EC9^$==T6q3uR{xdA_8A6P2%VI^RbrIX)EOx8=wPU}wf1VzGo z5kQ`QIZTKWr~@KJ%5fnqkmm&Q0+1oaFJqnj^p*S(s0eaj2A{_V4QbPBs{6UDBo%0+ z;pgXwGKw!EdZh_3tYaZ`DIoo08A4BB?$SgH4SUW=&$4|mkye@gG1>KDa8L%*^WZ>3 z6LK*)C`feWS23sVC4*vfxD$?Xt#DAW1Rk)G9At@{nsaKT1zF~$wkp~#xy>;ZSDs^y z+o)&C=R&AFZ3*1wag-Hu-j7CTqeCRDO5#Fo=f|;O#?;?shZ zn4ko0|B;V1v;?u|Cl)S;WuHM-E-2@T7Eq}Y}$I0D(0YFMjhO}+~pmw zxc4r&LUM$sXy<)a?hUzcdi-f) zCU#B-1Bltb!R>TE+cB1nZ{0Mp2jv47H5Uvp=)C1soV%51-bt)}y)rnx zt&2uIa2C8c@X<;$hWWhNH+Ut;-qJqtRlL2W{4;0PshLL1ke+E{i5+HP_p-3_@p9Bj zG)6u?!)9^|R*?`ihO?V^Qw&eigqw)xlDwn{#WXp4{cXT7yajd=#tbbXe)iVG#~Hhj zO_58_t!M1YJ$z$b;dwO+SpoJb4&qDLk#Z4&R47HMA`_c_B8ntr9s>&dh{r6fo0Jy2-e7w*B!jl$yS2BX} z&bTP2XTCc8w}Sk*h0+i#Hy*@zT=*vDy@~)y+7DNBzquh;mfLzKT?-Jv&zWyCqwQ&X$dA@(d{%<>^c>bkkvKeiQ~yaC@7dN z1)O(5Z=j3BX8g)Ki=fZJaV0{GMb}b<{S1CO*d`j7EEhLet*FC)ohBGnSP_pt%P$6u z|BAlimRI78Fni$V9qzGzBVh0`!K@aWuSif_;yeB1?JH>yRy`)=3b!AWwM)+ayJese zPN7q~800KcGG~`v4i?sQ)N7cJ<~cJn%6|zYuR`E0Dn&1Z3mza0wv#ZKCh?A5%_Rvl zSC;XueB}U{q&K0Kt1MOFVWH%+bgO_yaxUbzwG_A z`plo4Yi*vL);y0k4`ir@zp&Gk!b1Sp-<2}Om1g7$C8cFx(Q*UGv)^?;VZ}|gTppO2 zJRqvLsVlrijo@~IUZIhA9FMY-ba}bC*OJc%2--yf2R635`uS`wvMj-)4peUP5xRcE z_~u-dKkaT8s%9!%`+~*^3Me}&I;vd068B1t&@<(DmMRWKGA2hSrcXY-rtm#M zhPy_3IHYEq!%_vw)V@fPV8H;UI6RhD2Kh#wF5 z=POq5=8yl70nt86$t&>YK9y@0Z`rHu`%JaHihRYxvbU0P%sIsKGB9TzK`i47xijPA za*#DV9?wIv6P*_u3_XxQ>-lf&S-u&YP?;!!oVrgsROj$$*%NxxzVOAXL+82|vj9j6G^B3LA#R?Pm_z zXu6TJ+b?K zw69OjcZ3^NSlDY{Pc6F+8^N_}3AG*`iAk_1{1~>es2k$<<77;7ZhxqDaA9;(T=<&x>N(hZy~O7>Lz9R6T#@?evW~9)!@dGvs3Uhe)P><}kXCCw;Ne zx#q}xfU`jFn6g+l|K{FT2aVA|nmNpc<@8t6+g<{tiGf`y(8IVolrn&zWgWVc@?e4J zN`As+u$2p%F1JkIkdaezWF#{5C=1rwPEXwgwz8jfHz^Y+A!+jACXxsFLfMTm=^8H( za?+NGSA60ee>^*zu8?}O?o)>#iKG*Z44{gAiJhjk65zllHaoz)G}=hPHthiTV61??x(sjYg1-aGF_d=l?#~GjK6+ v++!>OdyL=B?n|`v&;RJie-%;=vAEMwS!6SW$`bp>bx!FT>*O83cN*z literal 26919 zcmeIb2|Sd0|2OVTM3kjMh^Z8O`}h3r=l?&`)x0{_T;J_;eV6y={ds@C z=fWN*yLpP*iZU`X^XzwS-zOs@2bGbT^;}^VaOdSS{XrR-pDyg#eZUs@PZ=3=85tiL zb8{JU;QO+S4{#N3+_(|AJ9TgB<6~}q+1$s++z0qBG{1b={4(%Ob)5RZ932xJVK7G+ z@J)0~sPX~wG6yc;aUY+{mwhf5`dp^^6atqNcy#K)sZRnd0l0{;gen*jxOzkD?nE@) z!?rw10J4Jt7w~&R0+E3kWgqj)Q@H|F0KN+^7XlacG8MR_cS_z6 zVU9!?jFY5wW?QIC6&IZZ>$YYXzd-MUN)y*_MsL6*JbMGJgC4&3jCo; zrJbFf-MExqBCNM6p|^_I+bivrO8ffy1_uYn#>S+DKGLcLDfP0HS~&H1FHzcCB?TUu zss&I8kOA;-scK)S8}KUT1noS8kde`io%;JzV&AB@jLZ@l`|UOd!f&z4#LVdo~;Up7Rl~XSgQ2Dc{oOaBfYj_yZFFW zb|ky23%d1YXYr}rgWXPr3!E4~lit^@h);Zb7E0Xth~LOwy!iZFb26gI(}u8=R(*f` z25h_jo(j?4y-Uju6Qt_6*+GV+_pf#{v}osd5Y9|y+}Vm!*c4;Yde6wmyq>vZ6G;6n zSY7>{zpS8h`E2}DK}#r?hfg#{>^8ll_TU%EgOx!Y=JN7V9Z-CiQ`p0}l)8;2YVlCH zaXVYwc+^Gc{%1)Z1IO-BuB>2U&{X2ca7iJmaeer z?2Ph_b4$~gQ5FxMNG`e2Z<(Rfpm1P4_CRUr7*GIWMggN^Z-lErwcXPz^QvYIJ5ym( z><==A$!4u*it1d@a=3FLhN*SO|ERyUnx8WAse{MckhF$)KiSncAgdKQ#XJVqmhf|X zwNdELjS)M(T4BWSyE1KYkRN0qy6|0<_jc81L*;&G-Sbd2zn#dZ{g(H?G28ZDIk=XGYii3)w7G;=KEdcKhcRnvY#Z znLDG;eP{lEp3QfLE`S>QQzr35p5njyO8$a{{!NDduHA3sQ1;PvZ1(~;)qm1K|AK`s z00U?Ce?yP= zL2Jf5|J_8EufUoA|J)3e2u;@c(E|L>-`jWX|IUp6UESUhCj}Y|E`{-Emu-S7`Jd^sn5VNe`;WU*D=4F9`PWLV}Gtk{k8ST z6=(EgVEIpHufNto@3;{({~Tbu^E&{tqi0~@?^X*RoM+1q?f>1Ae+?qf+30rI^5idI=Q9I z0g|>?p1E%m7n;3Y_YO zEOln}z-yh%Ofb1X&|aUWh|^oE!IYa>ZB2a&Iyd()&N_nF`mupEK<-oG%*EEJGTmcl zocVo0$pd+X+ocFy+PT7L}$#R__u;ltX>V7t*IYmV6rmF_n$aQBkdA_k#6u zBmzl)ERh#J9v`IH$AJ4DFX_6`S4B$@Wj!WABo1>rsK%%9s)3s&FU zwv907A83Prh~T>z=p|+k&Bg%_qHcH^(gv)3I7CH-kvO))aJX67|Mx zq$O9btzBNghBvd>&aOn$ES0zV6!Ym*@>C&{%ij@`?An}{1-2vKVSYn2d54e~T}e9! zkJpD<^T|I-!>LtK86@X!vS;UwSmDwh?ZfdPN?5@OG1q~OU-Dfc!*V4=h1qPtZ`Eck zrO3|k%HI2_;0$JXlXrHY6*l4inBzFsbMleFh|Ogya}z#|%?Ay7Ajg@wGM@LXE6v2r zXEyCvA8v|0@Fz<6OqI-djh`rzKT*_YcHvZhEOfyPmtk*)mF&&uGZ6S+G{WCioaV>; zZN)E4-Slq%f448j$)eWJ0bQF;vzg19yD%cJ%viiWv3>`X@)_0Xi5nd5OrM87_w8#S z$EK7E3vr(3rn8QQltW{dZa+-28CcpaGx!_@&d2F)E~$~R{Z7G-AGz8WdnfCqBIh`f z*2xf`?}^Mi^p^2-qg9j(JA6aF_tQPw(h3BdK-ptNFFB2MVU7!_Tkw@b`z-7T7bi15 z_9iE(lLAWzwA+$B$y?R@c7ID(mJU5(s4KqD_`dAY4LI|K7oo?e5x~X6x43rK zb9NI@G0P}7zNMKTP#D(~Lxg6|4KtbS(fMpDa>btPnMEI9j=z!1iZo7~QAS^1@fzGk z>%I#!?sFb7u1ftAXbK1J%&e_3dZ_paPI39n>H=B$<2?7>R@ij^8%p*;U`LVHj&za+ z^WKaUhUynAi9?=*IQ4(zR1O00V8^Zge7hpJi!@^FLsktVs!$^Nf+tO5gPxXbez+o4 z30VEWk(t51c#JA@#~n>^Higx_>KnkYi(p6GNno3JbxaIz!1;Viz{b!9#@)#8{NP@) zC0%;xKY9R1Zw>%2Pdy;V#$Mw(xtextu@X;Y)BQNNcFWy_83EvoEN1UWbJ^5J1>-yq z{-cFVu^O|i*=-vxM`gPgQCid?YG9e&xBwlmm5WA>H%l!jjK-Cea$8@qE0Y)Ww8ru4 zm=c_lMX_s$I$mV3!}vJ-y_a6DYi`sQO6TYRUHm4Jb-QQpy~YXy0O^*6!H{9&FJE$> z_^hEMc3QhRo|&RE@P1nQ{?b<}}9HxXP} zc`5!UVd|~LCcdVf0eU%O1vf_?OP~J&5MD0&Px}|Qs$2#=B0OeJN74&)uGObc; z2P2An*eU?(bZ)r!8Fg4^{b}r3S{)Tf$R{5~r zmSNoxUdSb=F}S}@|7^fUbwAdV+SV!~8W$fZ09k1Xcz3PqhTu;pOHe+U)Hg}a9jBUv zBSFzyinFLUOsb{J#I|F2Him7X{cb9L5mPksHN@jZbD4;5F1eKWvph(*mD~H+`gUvQ ziSbZxdR?7i0W_B14L7wTAk4q@tvOC|=H;973@Nd(V);bstSX5?YA(^ZB-8DzT~Q^- zDq;AA<`0o7S0NV;3j@K z+jJdsB>I$%(=l&TyF7|mg4|fF!s$2~q_dxWg3-mxlo-@v2vP|ztIUQ|Fpx&!SyKQ9 zqY#FC)Fq4rR63l+4mMf!^nN}!UU&aT^L(bHq3V-tBJY}r(O3SxRq)fGaV^*^~me--z+ zt)_I$scz!j5vkk#X_yM{o(j(WPcHe>9G$-r^0zU~zx|9aVa>lfBtBupf7y!mSFM(O z)dGAQ*8Ts!=T^2xomp|U_q*VCJP$x&alH}ucg{d`@BP?{vTaFSxkhR28PkE!>Qni? z>dfbnO1&u;@`Y3QB!cAr>g)RYWuT`WCN!k79kMo#`HdgBO?^2YR(x?fR?o1)L$`%W zcC4zuvi-Zlo%Jc4pvGmC4q*h%IxVlp|ph2 z0*8;*3L*OS=~5BncHbqCxCtJ0{I`{HX5T0po*lvQI`XBZ)kd-d=#BNql)s6#4?7X| zM*v^s0fz&Ado>VM|4>w%2B5I(Y3Nmy)$$JOrq@9ri>hn7b5*wO1a8Nnw@bfKIRLSsYDB%w{$vdnauFniWP4WOcaAtnp+btTRh8-ka@`TrUm~i_3CzTunVA;L6=6 zLjCuV_yW{;ZYMZ-3M$&!;-P?>@l>|0H{g$EpmJqV`4?pcu<^clw$TFS>AXn?$eO(b z?TG6ufmTJ|BGtG+>b^Mfs6<&ouixhPoiAVRx@kz%f!|7t61gHap`7l6Jho-kdZk+~ z`Lw($awASgTwhHwxbjDZ%OIfaj*}{Di>y^K*^*AwxLpzBm-`^AtR>g%DkbWlaE|Sm zh=G+DUVtcR_@(~CY#6{{?u%ZR>HwKu3r7=n`yu7Qf2bug^;C`D5J34#&=-fFPa(53Xu3PMk5#% zk!fXhC~kD(9`ameV$Zg^9R#tXlX7ZO2HZr&ZSt^Vcng+L{qkk1K5oh-0Gz@PzS&l? z+OjyGPkn@QO(7(7ylg<`v`8b8RZK3&3c7i{bSR;6TaR^#2VRv0e<~4N!&+d*hEE~x z=Uc^CH?et45!w7CV~1{Lnm{+S6zV zckx21EH;(TDtG}e&0D&ZlDYJcbd?6aOG!1`s2n-e1`f*}bL$fb<-T<)x@M7H+LTp< zrb>Fj(t)7R9=2WJ9OW{S{>X#y6a<0Flwi9*Io_^QbQslsN{hs4@`&jS&#swRgjhnk z;sFq?3vP-XO}yNEhKC~eDTRcCqwwgYjS6QfNPv!M2K_e$Zr67#V2%`K^zcjs-(uTY zxP(MMHNUoL;FCWc)&6$GJ6Ox}M;NPoKJInV)AR^s&I6Ln0qoX?!o~Er2!hoxT}Rfu zXqxcK>jKn2RM#0M8@PrlUkGA>bM&|!k$h9Lp4E|gHsT!d1L;~>^Ni_f0Nh%Xg*HtR zLea@cAQ66Oal7Pw=Uo8bc6NFen4T|pr7dXdkVlP|4R_RYXEizAW;;n1UHB2niHS+y zj^c*g;vKiGxKPj18SYqd`$&f4?DXyC!DWt)IW3< znTbAOTYtJC5eKcGSSw1dOpk%H@zO04#GN17{oey}C*S@Ja{s1*|8^Drb`_=tX#Vdq z)qmP;-L-&O@xq-GdT<)hisvb+G55iZc&n$O%?Aw~X-XkkrCxz!shy%zVMdRp!^*uG zWo7Dq1GTmwe7B2B4EG#oO#nX|vi!R;X>&F`yMg$b)-PwiW!37*>Q@%`rpbhNpxulq z4yO=0u~4F{Pjg%TjB2?pr|jI7td4ge%(EOgxcuF*Zv|x9tQ5HcRmSVnt~llL#)~u4 zn&56eYDKm;&B>3WKbZNW1`hSA_;h=dQhrTeN2liW7CEn?blqqict)F(pC0EwGpQmj z?exuYEmfvLp6%gpVffzL^?jguoiBG;Zn6*>Pz*M9w0zB&w2;{_P}^F94i6uX62JNa zXmk0L>VXd_sZK=?J~KGO+XV6`~0Qi4%R~|Sqwyr8lDc^z6@Wo0csyMw|Z6GQND?N=8j2PA#n%~je!0Nc~tGKEC&gW>SpE@%ja2X}8 z?-G(%=cS=eIxz1Vbuvfp3ndG|Fnfxc--vc0JAwl%$i#Y_bqP(+PA}V&C>aMZ?8H-b zKX}sSiSEI9;ia~dpfbW1HD*jJ`B3n9TV4L7id z6k6#ZPdygj zZ#y)|3YKU9)`Q8Jx10c3r|AJw(#b-7d#Nj)KBebc?yj^G>iZG4HT+bNG4oRVNLKhdVju-*VU^Jjhi-Q8^j z=ML8JEE??HnZevlQ~RZqyy+yTb>#g1rfPk|+RBTlM9Toko8kxvb}%1}?u?xOkZdm> z75EhJ9j9+lU{5UDCqBySx=oK}u>+LirOp&*+Jj?~2VWU& za9Rl)u0e-O>F@yUb*|g`!V;kBU*kfiPwUN3xM$Vkpr(5r@(%(~;oD4~hpgJD$=sfi zyzt}`0Cpf`vw)3JpZWUVOoHj}0aWL-yrd;_W}(`}OUdvU&$2E6=6fS~34k z@%))BA-6R8PW@UA{F=gM#5L_H#_R8S^))u>Yw3$x>T~}i?-0S|UE`gZHMX}Rox?1B z#Ueomw||#47=u0O&r^3h3r(F%U*|U6g6wit`@Hn407;rJFA}y_>>yB=P*#5u97WOb zw~`%Bf>-g&vHSgZVGXdZSP_FtEq+2K~5-Dsk1K>qrc)xE($sa6GV1~sj zGly;E&*a(+ELTOEGE@Ax0r2u;MV`D}DN50C5DIwho8^b2W0E)4ndJ{6|Wg z4h!8LES0BK>%FXRWX2pQ;gi>z(asxfBdDn8OsN3|9rb9#fv7^DIk}t9*2gQ`e5QHh zd1)TFG)(CQ=r&%!#QgZ-QRAA35PekTCmNO0D>y&OD%e1NhwpbskFy65ghW`8c0j=d z06Wj;e}$dfMtmG(QJ#R*CRl4Cg5Aj9wk?k7!~;!4#tgHJYbJsGil+EeR4_{BknU+$ zU-QnPv~YhQCQB@JmBO#(HvK9wu=DRsc%H#h^XmYpKKC(|s9V_=V=ks7`8p6BRhj!Z zFu9VFNzc%QW>JRZs94E>xY1DhF0J445E%nj904={tM_5Ha;o>PdsAVsKS(<0Y?4Cl zdm$n&?>u6+Z9^il_7(!UJ=H_{);{?&4m}ka6yC09q1RlQQm`%-6qrU?d@cvr(W)J| zVRmi|ss~^DS|Nm;=bvI3*UGKG6;k{wbZ|jRQsY#lpS|zYqeGCuC6pwA$$k3z{L_q} zB(;fZh@`1|;B{xBr$-Qzri-()t<_=sa@L6^|130}|0~~-}`R7dGg&^JbG0sR(AjOa!#>c@-PgZc72Mt{S z+LWY?H-r)nO;c}m#5yl4Gmq%KJ9%&~`${a!&dij}faAm9>{RDvT+{yNT%F0Jrmp>t z3Y^ku3~58%P4u%H5dOj z*jilsH*B3QH~Ab8{9EJsAE5F4)6Cg_0^l2ArW8M|a?9H56Y zU-m6k5Dz6)rmb(j2MNBq z0mGgI$2to+)uH5?;p>+FfVM$~!jy%~N%lLwErI%0eHAEl`?pJ}Fl9bdEB4m$>UF>M z+*-T9?*{bFclmE6B+V)uUtx%FfBxYmdcG|oVcG%?0k%>--eP(+$s(tc_q^VEGs>GJ zimY0qkbm)uH||V?YF^B9z=5(qUR(LCgAk=Hhtfh6t~x-#Hio3RduPR3Aw}G!5z){HO9(XvN+d<&s#2pC)JpJ9Zxc~gL_ZOw%IZp@QPEL zq5^1EmZcG7$;Yp|qR>o003e}Lr&U|ifKq+BX{FJ;Rfqx*YXi+qQ+itCGg~_A^ZqDm zrH?{LUWD}k?CG+7RH&JdY(yXFZ$V>~W3UlnI<88G*m_0>nF5h0|a{z)JSc-jj>d&!}O zyZh4o{N|t`77(utK-pVnoC}M@JSq&ejzY6ab3=U1n>$1#!E$;Am34*DFihj4lYa;23+ELj$8k~F*0;^9eto2@McjyohdGyS zE>V21z0`+m(R@cbpSW?Nv6>I#Zd7{f0c%z8c|vtX8Yptg4Bw z7}bJE4}Y&P79Jcm_RK2GGAD7P{}E-mHp|;)b3Mp5WIzNdlNOKwwLjTfu(`^>ZRBN| z@OR#JI{kN_WJ7oXa@FQGV!DYc(;lOB#<|Oh zr&k`6{`o8tb_F0*6%~R!LJ#RYHcJ@N83DAajjb2>cQpXLt#x<_;L4Du^PL$0A3WST z(tT9ySO04zE0Ck2QUuZscqi`+NLm@w^!7~gLciRg(gEUVzF_S>p`TPC2vUM>o zt$8Ng6ddqIE-$u6k|LK=4DE=?Q6`)KK&FaaMV8+|2&7L}h|(1Eqr5Pd#cKH^Bka>M z>&`NH$biRFuxAWAaD&{qM%47vO*%$_ApUTA)U%7jv&tqr`n!}HKKvSeBu!5*XP%&! zHf&8{8LXRXE52b`wfNmhJ9C}%%uZUnzDM^S74EI8)fqdINuvtsNjm`z#)yALts3*z z{}zdo^VV$whQvHsl%RVmQjhgjl8AJAzE9(TYuhkbZ#n0`w8JV&As-2w+F^z5UP3uJ z-C{pvPuQ_gQ`Ume>8FfoLe-@|DEr8a3iceB)%3 zJZ?DRqPP}_ogdT594D%PXyXfosV=ZDsbxzTt9Yiw_PV&hTXngBaJYW|5ZtvGuL;x@GRPoL%H5;4Z>KE-{5Cc%y$(xa6Z3~Fffe!|DL!(+hqeG92T>{0Y&y^0E4 zqgjwaC7P!TQ?AEBZBJsM1L^hZeSR(kXlLN{-{aZCC0CRf&-z98Hee5>l@_da1~^zX zrYFM>5Pmi$I*6JM=bq1S!GWx%&~ScR-z#wjcjWP~hzmlZt`$gXgzF$bvjSwZ02wJt z3sU0IHM)lz^|Z90)LI#pCtwRG(pnFGaK+-O@PDJE?zig$2XicOg+g~dAkAri!tuuX z>KojjUJ=394FSsVi1^v{b5^NyF1`_fbI5Dhim7cdUBC za=fVSccJ;%Kx3briG<0kl^z%G7+f>9CPCrA)mli9oCM~w>L9^3ZS%vi?xUv}dQQa7 z-%|NXq803%rPW9?&^1}y@Y+i6!l?Yiv~b^MM|Vpu#7$0eHT{t*pcoMIi~9HhOZM2K zK+0Bmv`Ha4%-{)A;=awlwyqdxnM4(e-POT=($R=P=F0?m}M7 zbBFz8W|*5Y>V}=Q|6QfRi3y$5X35YBZYpGsOI0WJ)vaL2y$p7up zwXhM9z@yE$yhRaF7Lvo)^D-VxE_x;3a9Ba^W_jBRxwi86Jzi0WvZwXo_mnd1RA4&< z9^BW0eGFY6x(z?^{r%XSib}|qMOGSiepy`djplMr#YqdvrDkh=VArw@;a;x|^4BL} zN7lu2v0i$Xo@890Mnzjj#o@LXsB`LO2`lb--Ht@Pp?FT$yUJhk#(vr4M6Mv^NuE$r zPKl!>+mXWvu`|=#*>MRaZZ_`vRZ^-qRqvEh7nY+1oX zY`XcDa2Vnwsa(FRjeZj)4n#XYr*%ARnYh=+o_(p`pOX^kt6QRrS__=ImEjqi7iedl ziFXv#lProooA`D2V^YxmBl!cBuK3##!&LjYGQ)f=eC5gU8bMPVY5qXwfg>J7qYW71)I*gTQw)xLGd+bd90pHuX?UJP+g>)zaqRuj@wbXx&= zF17WcUd&v60i7^aM<^`gejw`uAD%SjnNXT&bFbmi5kbUN1 zv*#L0wEv*~jl}YXk8G3s5*k0%+ojA!d>YZTy!1LG(zLZhXZ^AFxueO{=AAcEr6Kc& zlhC>2L^k;MvIO73^MjdI(hP2Qn`Y|pYZiyZhOkQ!1Y;-DYTkzj4ZAr09J|gpQ!0SUU`7)l0p9cs$H%!bJcf`zA8J?rw!G)WCD_y*F)S_eltbDv9I`bP_;5syz- zaVrMr?t>si<~^;4Khkd$3Nj5B71vBe_$uU=^bqg?kL@?KK5G~-6b7f8(K%_4?UCW) ze)7aGjcjgIPa+Acn)OpIzm19Qw7^DnB;Moz-O!#p=&8HtfSrC5d_uq%O~PH|wL6D8 z-nn?Wq;Jnsfr4{8X!CU%CY zFE4ZX;+}F;w0x88IWV_@p@(x?l+j#uLl+e=2IDzO9<+vu%##2s6~4NE<+MfP-ZtM` zt0?sn^Oogl0Z+G~V#`m*WgPDb)RXeD53K9VHqh`+gdXf7y$}#>_y)c299ePnQ$=d* zk&_>>S{@O)w95yu=Mmx~(t91LPYjBoXym%U+!y?EcDk{ja(F6Htv-~kQ?KEdJu&Av zM_Vgz8)VUuXMFRfSG^2usaC)UP#uH9Q{}hv8?le2-bNJ0O(j&L<@`br$L=c1&5P=4tD*g}QO?|KiIc2(WJikTm8(FB zbcK-M^QaG@w}ug=Y;2Y^$=rcp3@^0YM#!K~6;gU>#Rhj*GUn)-=!Sie{RG`*3;aSz zN5$n&VD#NM6J)aUqT5gIznxbi_*lVQ-qqD{)~983aJNq*7zgYbXKF$;#kQYP<$PZ7kD*2(jR^yWz_!e z9Q~g^+$gqjYi!rb9_|=yKbe^PIv0UM5RO`GtivVnYfmHvJ*`&OQrcuLZ+?|ZD}P_I z#+8=tU2+~OWPB}SRjc1qtGtLJ(vj1MR1{vGR#6)xtL+g3ZaIPYsp!xz3s&J*Q1~h( zq!Y6*LEkM!MCZUnk{8S+5BTORmPcc=eeMh7BYAn2%fuc+52IF<>6aAw^k~KcT!I&B zHHB;ZlAbqG`;eY3Lp@zTpUF!xPJ?Ym@jef!YC*E7E%`&-kXW;jkHPYy>4o=MpkeFZ zBN)dR&Vk$1dkzr$W^X)0NhDZUaVe8DT^ zN(SGORW>bT$_>w|HL*9)F*R573j#AISfQbE#PS`I3C&vL7p??kJw(3E$+BF9^A^0h zUmmBC#Oib|w-!VN$)L)_?3kPvNVrIlzFoVm7lxR3o4@M%E(W(U?52vAhmhB!A~0^M z53@~N6y3aNqbWX=DKDR-@^bEu0a-H?el(!$3Mp~2H2kJgh^SNxG>_8Vp}pB$jd^1M zu#LSobC`%x^6E5pSFc;>$2Fr2NV+xcj8U(@3?IH4W0*HV5_>nE7++ckY$D(2v^xQ! z)VZ2HDP3i@m^x8%v@jiQ-m=qsWl?|GmHn@KH}81Mvb%hkL(khZrxzG^v>9L=n*m;LcxxtfG;KZXM)5%l31LfvG$ii z^PYW9>W6klh~0u`V=erIOX~*es`?GD1mk7{m~dO64(5Q=iya^_sP6$sp~NgidZ3C0 zaidamk#Vz7x_zJ%vgMcEUdOA3%&B)W4ZF`>8+H^uYtEBu?-vFaLu2bxS5O)Z`a85Y zX)APIaN@=udGytxZ4yLY zeVzyAHGe^poDJdb8{VDzR{kmWromb|BLb!&UQx27K-8GYvc)78>z2AU1cea!qn%eX^?}pLhL;Q-84HmGPJPxri*ZOx zSh7d9B>FMDx={@{>JX{=!_0(NEK=*&VFeo@X=Sxw5emkA*J={ZN-X#8@-qLNCska9 zmvlx=ytSM)ypu3^(DQbkP5|>q=OEbIt1CYg#L{} z70dlL1mTsP1QyOyj460QLe}}?^DE~y6(LFM*c@UivXzyz=I8SEs7^@j7Jh^k&b4Sx zKFgiGadk~uyShPQX-ib6b#UepLi8Mxn^toXb2Oj2R?W}boEC(huzUpnspn+T zA(UD_QYyygB$n&M!8{B2p~8y*7Nb$|!(L}XA<3G4LiSO%-0Bem1#Y{BGDvGCOJBJ5 zPWqdooYPvdU9F11NR+iBh_QA4Pug{LLpB#i?D6oQYdUCHTs2`_we{Rka%bU zw4YdAy@E5UkDQ%=-`yf%-z}HBlu7fs0)?Ea=jR}ij9-1c0?4pB%P@u#JEW2^XjmoN zHr2d7ip#3AlLUyTfI7gmqb(>^*x}{Mwky z49Ul6&jQ0dddM90^X_BVxUTvM9_LsA-L}VrIs=o0`AP;S?DMu#(APD=3^4v3y!K zFbnMrQjf}krXD%{AwqH_b)a{VktXvI;}z1+m0uCi*X-+^=9 zxjqq!Hl`?q?0NQ1pCT%#v?C-H+Rd}ppXy&YXsS+A5&OPr{RO#_I%DZ=lMgkSuk|TU zpsDxL^7ag5G%d)ttT3#96~W+K&y=wM9x1M>~vZfFL(!x*!9+_ZE3S~KV-TgO-;832G7A+Z((`_ms3eE+M{TtdoXW;5!VJwKAPMg z-oWF@wGl_k+=hyR?Qtd#e0-g}+dPgqds;kGgtQpD#~51d_9C#sA#SZCQ~UV6CT`tW z;ft<=!y;1e1`vx3MS__;bzwHg^YjI1GvuRj6LKO! z_HgH~dueyk;!RiSP zu7$bJn28rGC3F;7GEo5>_G(LRFl;x>rC~Iz(NmMkN?=l%-{}*9JoTPMo-BJ=U}BXG zOD9cULazmkk*dhy)2x0=pR8c6{~3E?EoJnqN@1mi7{$g5{n`R^=(#`B>?QLDnnYHV z3MC`el~-ahByhhNyNnAzEn=oNap|mdJw>T@V-MIBtNF3GK5G4gLTvqmY&r-(S=wcC zUyy6!QdD5toQ}Hg08UW|vFsL%Fqb`y^$c-RWk!7hHJ0;0 z7*Tv{@)Nax+z^X34Y2f0O2B*m1a~Cx_!_5?I(IoDm|^je2b?3UjnamseD6N Wvus3#$4439V{hxUJ!k7LXZ{z7k}JLd diff --git a/docs/articles/index.html b/docs/articles/index.html index 02b446ee..57da0027 100644 --- a/docs/articles/index.html +++ b/docs/articles/index.html @@ -21,11 +21,9 @@ - - - + @@ -36,7 +34,6 @@ - @@ -61,7 +58,7 @@ sjstats - 0.14.3.9000 + 0.15.0

    diff --git a/docs/articles/mixedmodels-statistics.html b/docs/articles/mixedmodels-statistics.html index 090b0611..4e08833c 100644 --- a/docs/articles/mixedmodels-statistics.html +++ b/docs/articles/mixedmodels-statistics.html @@ -30,7 +30,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -89,7 +89,7 @@

    Statistics for Mixed Effects Models

    Daniel Lüdecke

    -

    2018-05-24

    +

    2018-06-06

    Source: vignettes/mixedmodels-statistics.Rmd @@ -118,13 +118,16 @@

  • r2()
  • Befor we start, we fit a simple linear mixed model:

    -
    library(sjstats)
    -library(lme4)
    -# load sample data
    -data(sleepstudy)
    -
    -# fit linear mixed model
    -m <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy)
    +

    Sample Size Calculation for Mixed Models

    @@ -133,40 +136,40 @@

    Design Effect for Two-Level Mixed Models

    deff() computes this design effect for linear mixed models with two-level design. It requires the approximated average number of observations per grouping cluster (i.e. level-2 unit) and the assumed intraclass correlation coefficient (ICC) for the multilevel-model. Typically, the minimum assumed value for the ICC is 0.05.

    -
    # Design effect for two-level model with 30 observations per
    -# cluster group (level-2 unit) and an assumed intraclass
    -# correlation coefficient of 0.05.
    -deff(n = 30)
    -#> [1] 2.45
    -
    -# Design effect for two-level model with 24 observation per cluster
    -# group and an assumed intraclass correlation coefficient of 0.2.
    -deff(n = 24, icc = 0.2)
    -#> [1] 5.6
    +

    Calculating the Sample Size for Linear Mixed Models

    smpsize_lmm() combines the functions for power calculation from the pwr-package and design effect deff(). It computes an approximated sample size for linear mixed models (two-level-designs), based on power-calculation for standard design and adjusted for design effect for 2-level-designs.

    -
    # Sample size for multilevel model with 30 cluster groups and a small to
    -# medium effect size (Cohen's d) of 0.3. 27 subjects per cluster and
    -# hence a total sample size of about 802 observations is needed.
    -smpsize_lmm(eff.size = .3, k = 30)
    -#> $`Subjects per Cluster`
    -#> [1] 27
    -#> 
    -#> $`Total Sample Size`
    -#> [1] 802
    -
    -# Sample size for multilevel model with 20 cluster groups and a medium
    -# to large effect size for linear models of 0.2. Five subjects per cluster and
    -# hence a total sample size of about 107 observations is needed.
    -smpsize_lmm(eff.size = .2, df.n = 5, k = 20, power = .9)
    -#> $`Subjects per Cluster`
    -#> [1] 5
    -#> 
    -#> $`Total Sample Size`
    -#> [1] 107
    +

    There are more ways to perform power calculations for multilevel models, however, most of these require very detailed knowledge about the sample characteristics and performing simulation studys. smpsize_lmm() is a more pragmatic alternative to these approaches.

    @@ -175,58 +178,54 @@

    Trouble Shooting

    Sometimes, when fitting mixed models, covergence warnings or warnings about singularity may come up (see details on these issues in this FAQ). These warnings may arise due to the strict tresholds and / or may be safely ignored. converge_ok() and is_singular() may help to check whether such a warning is problematic or not.

    converge_ok() provides an alternative convergence test for merMod-objects (with a less strict treshold, as suggested by one of the lme4-package authors), while is_singular() can be used in case of post-fitting convergence warnings, such as warnings about negative eigenvalues of the Hessian. In both cases, if the function returns TRUE, these warnings can most likely be ignored.

    -
    converge_ok(m)
    -#> 1.79669205387819e-07 
    -#>                 TRUE
    -
    -is_singular(m)
    -#> [1] FALSE
    +

    Rescale model weights for complex samples

    Most functions to fit multilevel and mixed effects models only allow to specify frequency weights, but not design (i.e. sampling or probability) weights, which should be used when analyzing complex samples and survey data.

    scale_weights() implements an algorithm proposed by Aaparouhov (2006) and Carle (2009) to rescale design weights in survey data to account for the grouping structure of multilevel models, which then can be used for multilevel modelling.

    -

    To calculate a weight-vector that can be used in multilevel models, scale_weights() needs that data frame with survey data as x-argument. This data frame should contain 1) a cluster ID (argument cluster.id), which represents the strata of the survey data (the level-2-cluster variable) and 2) the probability weights (argument pweight), which represents the design or sampling weights of the survey data (level-1-weight).

    +

    To calculate a weight-vector that can be used in multilevel models, scale_weights() needs the data frame with survey data as x-argument. This data frame should contain 1) a cluster ID (argument cluster.id), which represents the strata of the survey data (the level-2-cluster variable) and 2) the probability weights (argument pweight), which represents the design or sampling weights of the survey data (level-1-weight).

    scale_weights() then returns the original data frame, including two new variables: svywght_a, where the sample weights pweight are adjusted by a factor that represents the proportion of cluster size divided by the sum of sampling weights within each cluster. The adjustment factor for svywght_b is the sum of sample weights within each cluster devided by the sum of squared sample weights within each cluster (see Carle (2009), Appendix B, for details).

    -
    data(nhanes_sample)
    -scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR)
    -#> # A tibble: 2,992 x 9
    -#>    total   age RIAGENDR RIDRETH1 SDMVPSU SDMVSTRA WTINT2YR svywght_a svywght_b
    -#>    <dbl> <dbl>    <dbl>    <dbl>   <dbl>    <dbl>    <dbl>     <dbl>     <dbl>
    -#>  1     1  2.2         1        3       2       31   97594.     1.57      1.20 
    -#>  2     7  2.08        2        3       1       29   39599.     0.623     0.525
    -#>  3     3  1.48        2        1       2       42   26620.     0.898     0.544
    -#>  4     4  1.32        2        4       2       33   34999.     0.708     0.550
    -#>  5     1  2           2        1       1       41   14746.     0.422     0.312
    -#>  6     6  2.2         2        4       1       38   28232.     0.688     0.516
    -#>  7   350  1.6         1        3       2       33   93162.     1.89      1.46 
    -#>  8    NA  1.48        2        3       1       29   82276.     1.29      1.09 
    -#>  9     3  2.28        2        4       1       41   24726.     0.707     0.523
    -#> 10    30  0.84        1        3       2       35   39895.     0.760     0.594
    -#> # ... with 2,982 more rows
    +

    P-Values

    -

    For linear mixed models, the summary() in lme4 does not report p-values. Other objects from regression functions are not consistent in their output structure when reporting p-values. p_value() aims at returning a standardized (“tidy”) output for any regression model. The return value is always a data frame (a tibble) with three columns: term, p.value and std.error, which represent the name, p-value and standard error for each term.

    +

    For linear mixed models, the summary() in lme4 does not report p-values. Other objects from regression functions are not consistent in their output structure when reporting p-values. p_value() aims at returning a standardized (“tidy”) output for any regression model. The return value is always a data frame with three columns: term, p.value and std.error, which represent the name, p-value and standard error for each term.

    For linear mixed models, the approximation of p-values are more precise when p.kr = TRUE, based on conditional F-tests with Kenward-Roger approximation for the degrees of freedom (calling pbkrtest::get_Lb_ddf()).

    -
    # Using the t-statistics for calculating the p-value
    -p_value(m)
    -#> # A tibble: 2 x 3
    -#>   term          p.value std.error
    -#>   <chr>           <dbl>     <dbl>
    -#> 1 (Intercept) 4.50e-297      6.82
    -#> 2 Days        1.27e- 11      1.55
    -
    -# p-values based on conditional F-tests with 
    -# Kenward-Roger approximation for the degrees of freedom
    -p_value(m, p.kr = TRUE)
    -#> # A tibble: 2 x 3
    -#>   term         p.value std.error
    -#>   <chr>          <dbl>     <dbl>
    -#> 1 (Intercept) 1.17e-17      6.82
    -#> 2 Days        3.26e- 6      1.55
    +

    @@ -245,56 +244,52 @@

    rho.01: Random-Intercept-Slope-correlation

    You can access on of these values with get_re_var(), or all of them with re_var():

    -
    # get residual variance
    -get_re_var(m, "sigma_2")
    -#> [1] 654.941
    -
    -# get all random effect variances
    -re_var(m)
    -#>       Within-group-variance:  654.941
    -#>      Between-group-variance:  612.090 (Subject)
    -#>       Random-slope-variance:   35.072 (Subject.Days)
    -#>  Slope-Intercept-covariance:    9.604 (Subject)
    -#> Slope-Intercept-correlation:    0.066 (Subject)
    +

    Intraclass-Correlation Coefficient

    The components of the random effect variances are of interest when calculating the intraclass-correlation coefficient, ICC. The ICC is calculated by dividing the between-group-variance (random intercept variance) by the total variance (i.e. sum of between-group-variance and within-group (residual) variance). The ICC can be interpreted as “the proportion of the variance explained by the grouping structure in the population” (Hox 2002: 15).

    Usually, the ICC is calculated for the null model (“unconditional model”). However, according to Raudenbush and Bryk (2002) or Rabe-Hesketh and Skrondal (2012) it is also feasible to compute the ICC for full models with covariates (“conditional models”) and compare how much a level-2 variable explains the portion of variation in the grouping structure (random intercept).

    -

    The ICC for mixed models can be computed with icc(). Caution: For random-slope-intercept models, the ICC would differ at each unit of the predictors. Hence, the ICC for these kind of models cannot be understood simply as proportion of variance (see Goldstein et al. 2010). For convenience reasons, as the icc() function is also used to extract the different random effects variances (see re_var() above), the ICC for random-slope-intercept-models is reported nonetheless, but it is usually no meaningful summary of the proportion of variances.

    -
    icc(m)
    -#> Caution! ICC for random-slope-intercept models usually not meaningful. See 'Note' in `?icc`.
    -#> 
    -#> Linear mixed model
    -#>  Family: gaussian (identity)
    -#> Formula: Reaction ~ Days + (Days | Subject)
    -#> 
    -#>   ICC (Subject): 0.483090
    +

    The ICC for mixed models can be computed with icc(). Caution: For random-slope-intercept models, the ICC would differ at each unit of the predictors. Hence, the ICC for these kind of models cannot be understood simply as proportion of variance (see Goldstein et al. 2010). For convenience reasons, as the icc() function is also used to extract the different random effects variances (see re_var() above), the ICC for random-slope-intercept-models is reported nonetheless, but it is usually no meaningful summary of the proportion of variances.

    +

    You can also compute ICCs for multiple models at once:

    -
    # fit 2nd model
    -sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE)
    -m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy)
    -
    -# calculate ICC for both models
    -icc(m, m2)
    -#> Caution! ICC for random-slope-intercept models usually not meaningful. See 'Note' in `?icc`.
    -#> [[1]]
    -#> 
    -#> Linear mixed model
    -#>  Family: gaussian (identity)
    -#> Formula: Reaction ~ Days + (Days | Subject)
    -#> 
    -#>   ICC (Subject): 0.483090
    -#> 
    -#> [[2]]
    -#> 
    -#> Linear mixed model
    -#>  Family: gaussian (identity)
    -#> Formula: Reaction ~ Days + (1 | mygrp) + (1 | Subject)
    -#> 
    -#>     ICC (mygrp): 0.058094
    -#>   ICC (Subject): 0.606465
    +
    diff --git a/docs/authors.html b/docs/authors.html index 34780b11..e0ea84a7 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -21,11 +21,9 @@ - - - + @@ -36,7 +34,6 @@ - @@ -61,7 +58,7 @@ sjstats - 0.14.3.9000 + 0.15.0
    @@ -123,13 +120,13 @@

    Citation

    Lüdecke D (2018). sjstats: Statistical Functions for Regression Models. -R package version 0.14.3.9000, https://CRAN.R-project.org/package=sjstats. +R package version 0.15.0, https://CRAN.R-project.org/package=sjstats.

    @Manual{,
       title = {sjstats: Statistical Functions for Regression Models},
       author = {Daniel Lüdecke},
       year = {2018},
    -  note = {R package version 0.14.3.9000},
    +  note = {R package version 0.15.0},
       url = {https://CRAN.R-project.org/package=sjstats},
     }
    @@ -136,8 +136,8 @@

    Latest development build

    To install the latest development snapshot (see latest changes below), type following commands into the R console:

    -
    library(devtools)
    -devtools::install_github("strengejacke/sjstats")
    +

    Please note the package dependencies when installing from GitHub. The GitHub version of this package may depend on latest GitHub versions of my other packages, so you may need to install those first, if you encounter any problems. Here’s the order for installing packages from GitHub:

    sjlabelledsjmiscsjstatsggeffectssjPlot

    @@ -146,7 +146,7 @@

    Officiale, stable release

    CRAN_Status_Badge    downloads    total

    To install the latest stable release from CRAN, type following command into the R console:

    -
    install.packages("sjstats")
    +
    diff --git a/docs/news/index.html b/docs/news/index.html index 4949e889..316aac08 100644 --- a/docs/news/index.html +++ b/docs/news/index.html @@ -58,7 +58,7 @@ sjstats - 0.14.3.9000 + 0.15.0
    @@ -121,7 +121,7 @@

    Changelog

    -sjstats 0.15.0 Unreleased +sjstats 0.15.0 2018-06-06

    @@ -163,6 +163,8 @@

    Bug fixes

    • Fixed issue in se() for icc()-objects, where random effect term could not be found.
    • +
    • Fixed issue in se() for merMod-objects.
    • +
    • Fixed issue in p_value() for mixed models with KR-approximation, which is now more accurate.
    @@ -191,7 +193,7 @@

    • tidy_stan() was improved especially for more complex multilevel models.
    • -
    • Make tidy_stan() for large brmsfit-objects (esp. with random effects) more efficient.
    • +
    • Make tidy_stan() for large brmsfit-objects (esp. with random effects) more efficient.
    • Better print()-method for tidy_stan(), hdi(), rope(), icc() and some other functions.
    • link_inverse() now also should return the link-inverse function for most (or some or all?) custom families of brms-models.
    • diff --git a/docs/pkgdown.css b/docs/pkgdown.css index 6ca2f37a..c5ab586e 100644 --- a/docs/pkgdown.css +++ b/docs/pkgdown.css @@ -225,8 +225,3 @@ mark { border-bottom: 2px solid rgba(255, 153, 51, 0.3); padding: 1px; } - -/* vertical spacing after htmlwidgets */ -.html-widget { - margin-bottom: 10px; -} diff --git a/docs/pkgdown.js b/docs/pkgdown.js index de9bd724..16d57509 100644 --- a/docs/pkgdown.js +++ b/docs/pkgdown.js @@ -1,110 +1,174 @@ -/* http://gregfranko.com/blog/jquery-best-practices/ */ -(function($) { - $(function() { - - $("#sidebar") - .stick_in_parent({offset_top: 40}) - .on('sticky_kit:bottom', function(e) { - $(this).parent().css('position', 'static'); - }) - .on('sticky_kit:unbottom', function(e) { - $(this).parent().css('position', 'relative'); - }); - - $('body').scrollspy({ - target: '#sidebar', - offset: 60 +$(function() { + + $("#sidebar") + .stick_in_parent({offset_top: 40}) + .on('sticky_kit:bottom', function(e) { + $(this).parent().css('position', 'static'); + }) + .on('sticky_kit:unbottom', function(e) { + $(this).parent().css('position', 'relative'); }); - $('[data-toggle="tooltip"]').tooltip(); - - var cur_path = paths(location.pathname); - var links = $("#navbar ul li a"); - var max_length = -1; - var pos = -1; - for (var i = 0; i < links.length; i++) { - if (links[i].getAttribute("href") === "#") - continue; - var path = paths(links[i].pathname); - - var length = prefix_length(cur_path, path); - if (length > max_length) { - max_length = length; - pos = i; - } - } + $('body').scrollspy({ + target: '#sidebar', + offset: 60 + }); + + $('[data-toggle="tooltip"]').tooltip(); + + var cur_path = paths(location.pathname); + $("#navbar ul li a").each(function(index, value) { + if (value.text == "Home") + return; + if (value.getAttribute("href") === "#") + return; - // Add class to parent
    • , and enclosing
    • if in dropdown - if (pos >= 0) { - var menu_anchor = $(links[pos]); + var path = paths(value.pathname); + if (is_prefix(cur_path, path)) { + // Add class to parent
    • , and enclosing
    • if in dropdown + var menu_anchor = $(value); menu_anchor.parent().addClass("active"); menu_anchor.closest("li.dropdown").addClass("active"); } }); +}); - function paths(pathname) { - var pieces = pathname.split("/"); - pieces.shift(); // always starts with / +$(document).ready(function() { + // do keyword highlighting + /* modified from https://jsfiddle.net/julmot/bL6bb5oo/ */ + var mark = function() { - var end = pieces[pieces.length - 1]; - if (end === "index.html" || end === "") - pieces.pop(); - return(pieces); - } + var referrer = document.URL ; + var paramKey = "q" ; - function prefix_length(needle, haystack) { - if (needle.length > haystack.length) - return(0); + if (referrer.indexOf("?") !== -1) { + var qs = referrer.substr(referrer.indexOf('?') + 1); + var qs_noanchor = qs.split('#')[0]; + var qsa = qs_noanchor.split('&'); + var keyword = ""; - // Special case for length-0 haystack, since for loop won't run - if (haystack.length === 0) { - return(needle.length === 0 ? 1 : 0); - } + for (var i = 0; i < qsa.length; i++) { + var currentParam = qsa[i].split('='); - for (var i = 0; i < haystack.length; i++) { - if (needle[i] != haystack[i]) - return(i); + if (currentParam.length !== 2) { + continue; + } + + if (currentParam[0] == paramKey) { + keyword = decodeURIComponent(currentParam[1].replace(/\+/g, "%20")); + } + } + + if (keyword !== "") { + $(".contents").unmark({ + done: function() { + $(".contents").mark(keyword); + } + }); + } } + }; - return(haystack.length); - } + mark(); +}); + +function paths(pathname) { + var pieces = pathname.split("/"); + pieces.shift(); // always starts with / - /* Clipboard --------------------------*/ + var end = pieces[pieces.length - 1]; + if (end === "index.html" || end === "") + pieces.pop(); + return(pieces); +} - function changeTooltipMessage(element, msg) { - var tooltipOriginalTitle=element.getAttribute('data-original-title'); - element.setAttribute('data-original-title', msg); - $(element).tooltip('show'); - element.setAttribute('data-original-title', tooltipOriginalTitle); +function is_prefix(needle, haystack) { + if (needle.length > haystack.lengh) + return(false); + + // Special case for length-0 haystack, since for loop won't run + if (haystack.length === 0) { + return(needle.length === 0); + } + + for (var i = 0; i < haystack.length; i++) { + if (needle[i] != haystack[i]) + return(false); } - if(Clipboard.isSupported()) { - $(document).ready(function() { - var copyButton = ""; + return(true); +} - $(".examples, div.sourceCode").addClass("hasCopyButton"); +/* Clipboard --------------------------*/ - // Insert copy buttons: - $(copyButton).prependTo(".hasCopyButton"); +function changeTooltipMessage(element, msg) { + var tooltipOriginalTitle=element.getAttribute('data-original-title'); + element.setAttribute('data-original-title', msg); + $(element).tooltip('show'); + element.setAttribute('data-original-title', tooltipOriginalTitle); +} - // Initialize tooltips: - $('.btn-copy-ex').tooltip({container: 'body'}); +if(Clipboard.isSupported()) { + $(document).ready(function() { + var copyButton = ""; - // Initialize clipboard: - var clipboardBtnCopies = new Clipboard('[data-clipboard-copy]', { - text: function(trigger) { - return trigger.parentNode.textContent; - } - }); + $(".examples").addClass("hasCopyButton"); - clipboardBtnCopies.on('success', function(e) { - changeTooltipMessage(e.trigger, 'Copied!'); - e.clearSelection(); - }); + // Insert copy buttons: + $(copyButton).prependTo(".hasCopyButton"); - clipboardBtnCopies.on('error', function() { - changeTooltipMessage(e.trigger,'Press Ctrl+C or Command+C to copy'); - }); + // Initialize tooltips: + $('.btn-copy-ex').tooltip({container: 'body'}); + + // Initialize clipboard: + var clipboardBtnCopies = new Clipboard('[data-clipboard-copy]', { + text: function(trigger) { + return trigger.parentNode.textContent; + } }); + + clipboardBtnCopies.on('success', function(e) { + changeTooltipMessage(e.trigger, 'Copied!'); + e.clearSelection(); + }); + + clipboardBtnCopies.on('error', function() { + changeTooltipMessage(e.trigger,'Press Ctrl+C or Command+C to copy'); + }); + }); +} + +/* Search term highlighting ------------------------------*/ + +function matchedWords(hit) { + var words = []; + + var hierarchy = hit._highlightResult.hierarchy; + // loop to fetch from lvl0, lvl1, etc. + for (var idx in hierarchy) { + words = words.concat(hierarchy[idx].matchedWords); + } + + var content = hit._highlightResult.content; + if (content) { + words = words.concat(content.matchedWords); } -})(window.jQuery || window.$) + + // return unique words + var words_uniq = [...new Set(words)]; + return words_uniq; +} + +function updateHitURL(hit) { + + var words = matchedWords(hit); + var url = ""; + + if (hit.anchor) { + url = hit.url_without_anchor + '?q=' + escape(words.join(" ")) + '#' + hit.anchor; + } else { + url = hit.url + '?q=' + escape(words.join(" ")); + } + + return url; +} diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml index a78564fe..411b2222 100644 --- a/docs/pkgdown.yml +++ b/docs/pkgdown.yml @@ -1,5 +1,5 @@ -pandoc: 1.19.2.1 -pkgdown: 1.1.0 +pandoc: 2.1.1 +pkgdown: 1.0.0 pkgdown_sha: ~ articles: anova-statistics: anova-statistics.html diff --git a/docs/reference/boot_ci.html b/docs/reference/boot_ci.html index 6bbd1ba0..3766c1c4 100644 --- a/docs/reference/boot_ci.html +++ b/docs/reference/boot_ci.html @@ -21,11 +21,9 @@ - - - + @@ -40,7 +38,6 @@ - @@ -65,7 +62,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/bootstrap.html b/docs/reference/bootstrap.html index a23ae83d..38a73fab 100644 --- a/docs/reference/bootstrap.html +++ b/docs/reference/bootstrap.html @@ -21,11 +21,9 @@ - - - + @@ -39,7 +37,6 @@ - @@ -64,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -263,9 +260,9 @@

      Examp # or as tidyverse-approach library(dplyr)
      #> -#> Attaching package: 'dplyr'
      #> The following objects are masked from 'package:stats': +#> Attaching package: ‘dplyr’
      #> The following objects are masked from ‘package:stats’: #> -#> filter, lag
      #> The following objects are masked from 'package:base': +#> filter, lag
      #> The following objects are masked from ‘package:base’: #> #> intersect, setdiff, setequal, union
      library(purrr) bs <- efc %>% diff --git a/docs/reference/check_assumptions.html b/docs/reference/check_assumptions.html index 8c074a32..26890ff2 100644 --- a/docs/reference/check_assumptions.html +++ b/docs/reference/check_assumptions.html @@ -21,11 +21,9 @@ - - - + @@ -45,7 +43,6 @@ - @@ -70,7 +67,7 @@ sjstats - 0.14.3.9000 + 0.15.0
      @@ -227,7 +224,8 @@

      Details < 0.05 indicates a significant deviation from normal distribution. Note that this formal test almost always yields significant results for the distribution of residuals and visual inspection (e.g. qqplots) - are preferable (see sjp.lm with type = "ma"). + are preferable (see plot_model with + type = "diag").

      multicollin() wraps vif and returns the logical result as tibble. TRUE, if multicollinearity @@ -247,7 +245,7 @@

      Note

      These formal tests are very strict and in most cases violation of model assumptions are alerted, though the model is actually ok. It is preferable to check model assumptions based on visual inspection - (see sjp.lm with type = "ma").

      + (see plot_model with type = "diag").

      Examples

      diff --git a/docs/reference/chisq_gof.html b/docs/reference/chisq_gof.html index 73552a2c..67d76ecd 100644 --- a/docs/reference/chisq_gof.html +++ b/docs/reference/chisq_gof.html @@ -21,11 +21,9 @@ - - - + @@ -40,7 +38,6 @@ - @@ -65,7 +62,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/cod.html b/docs/reference/cod.html index 07f3f675..20d3eca8 100644 --- a/docs/reference/cod.html +++ b/docs/reference/cod.html @@ -21,11 +21,9 @@ - - - + @@ -41,7 +39,6 @@ - @@ -66,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -144,7 +141,7 @@

      Arg x -

      Fitted glm or glmer model.

      +

      Fitted glm or glmer model.

      @@ -172,7 +169,7 @@

      See a

      Examples

      library(sjmisc)
      #> -#> Attaching package: 'sjmisc'
      #> The following object is masked from 'package:purrr': +#> Attaching package: ‘sjmisc’
      #> The following object is masked from ‘package:purrr’: #> #> is_empty
      data(efc) diff --git a/docs/reference/converge_ok.html b/docs/reference/converge_ok.html index 7f8cf8ca..9068e888 100644 --- a/docs/reference/converge_ok.html +++ b/docs/reference/converge_ok.html @@ -21,11 +21,9 @@ - - - + @@ -42,7 +40,6 @@ - @@ -67,7 +64,7 @@ sjstats - 0.14.3.9000 + 0.15.0
      diff --git a/docs/reference/cv.html b/docs/reference/cv.html index cbd02164..71e58eb3 100644 --- a/docs/reference/cv.html +++ b/docs/reference/cv.html @@ -21,11 +21,9 @@ - - - + @@ -41,7 +39,6 @@ - @@ -66,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -192,9 +189,9 @@

      Examp fit <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) cv(fit)
      #> [1] 0.07851737
      library(nlme)
      #> -#> Attaching package: 'nlme'
      #> The following object is masked from 'package:lme4': +#> Attaching package: ‘nlme’
      #> The following object is masked from ‘package:lme4’: #> -#> lmList
      #> The following object is masked from 'package:dplyr': +#> lmList
      #> The following object is masked from ‘package:dplyr’: #> #> collapse
      fit <- lme(Reaction ~ Days, random = ~ Days | Subject, data = sleepstudy) cv(fit)
      #> [1] 0.07851744
      diff --git a/docs/reference/cv_error.html b/docs/reference/cv_error.html index a3301697..cda6c0ec 100644 --- a/docs/reference/cv_error.html +++ b/docs/reference/cv_error.html @@ -21,11 +21,9 @@ - - - + @@ -41,7 +39,6 @@ - @@ -66,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0
      diff --git a/docs/reference/deff.html b/docs/reference/deff.html index 5ba3c46e..e5fd79b6 100644 --- a/docs/reference/deff.html +++ b/docs/reference/deff.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/efc.html b/docs/reference/efc.html index d79981e9..b2659a22 100644 --- a/docs/reference/efc.html +++ b/docs/reference/efc.html @@ -60,7 +60,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/eta_sq.html b/docs/reference/eta_sq.html index 1d9e5cbc..dd31510b 100644 --- a/docs/reference/eta_sq.html +++ b/docs/reference/eta_sq.html @@ -62,7 +62,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -189,7 +189,9 @@

      Details

      References

      -

      Levine TR, Hullett CR (2002): Eta Squared, Partial Eta Squared, and Misreporting of Effect Size in Communication Research (pdf)

      +

      Levine TR, Hullett CR (2002): Eta Squared, Partial Eta Squared, and Misreporting of Effect Size in Communication Research (pdf) +

      + Tippey K, Longnecker MT (2016): An Ad Hoc Method for Computing Pseudo-Effect Size for Mixed Model. (pdf)

      Examples

      diff --git a/docs/reference/find_beta.html b/docs/reference/find_beta.html index 3c827896..b879f9e8 100644 --- a/docs/reference/find_beta.html +++ b/docs/reference/find_beta.html @@ -65,7 +65,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/gmd.html b/docs/reference/gmd.html index 56bccda9..e09d7ff3 100644 --- a/docs/reference/gmd.html +++ b/docs/reference/gmd.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/grpmean.html b/docs/reference/grpmean.html index 76a6e7d9..3baefd32 100644 --- a/docs/reference/grpmean.html +++ b/docs/reference/grpmean.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -166,7 +166,9 @@

      Arg out

      Character vector, indicating whether the results should be printed to console (out = "txt") or as HTML-table in the viewer-pane -(out = "viewer") or browser (out = "browser").

      +(out = "viewer") or browser (out = "browser"), of if the +results should be plotted (out = "plot", only applies to certain +functions). May be abbreviated.

      @@ -266,13 +268,13 @@

      Examp #> # Grouped Means for average number of hours of care per week by elder's dependency #> #> term mean N std.dev std.error p.value -#> 1 independent 10.35 66 8.01 0.99 <0.001 -#> 2 slightly dependent 17.73 225 17.74 1.18 <0.001 -#> 3 moderately dependent 33.82 306 41.54 2.37 0.85 -#> 4 severely dependent 75.32 304 61.72 3.54 <0.001 -#> 5 Total 42.17 901 50.82 1.69 +#> 1 independent 9.58 66 8.01 0.99 <0.001 +#> 2 slightly dependent 16.94 225 17.74 1.18 <0.001 +#> 3 moderately dependent 35.20 306 41.54 2.37 0.62 +#> 4 severely dependent 74.11 304 61.72 3.54 <0.001 +#> 5 Total 41.87 901 50.82 1.69 #> -#> Anova: R2=0.247; adj.R2=0.245; F=98.117; p=0.000
      +#> Anova: R2=0.233; adj.R2=0.231; F=91.047; p=0.000

      @@ -148,11 +148,11 @@

      Compute statistics for MCMC samples and Stan models

      hdi(x, prob = 0.9, trans = NULL, type = c("fixed", "random", "all"))
       
      -equi_test(x, rope, eff_size, plot = FALSE, ...)
      +equi_test(x, rope, eff_size, out = c("txt", "viewer", "browser", "plot"), ...)
       
       mcse(x, type = c("fixed", "random", "all"))
       
      -mediation(x, treatment, mediator, prob = 0.9, ...)
      +mediation(x, treatment, mediator, prob = 0.9, typical = "median", ...)
       
       n_eff(x, type = c("fixed", "random", "all"))
       
      @@ -204,9 +204,12 @@ 

      Arg argument will be ignored.

      - plot -

      Logical, if TRUE, equi_test() returns a plot -instead of a data frame.

      + out +

      Character vector, indicating whether the results should be printed +to console (out = "txt") or as HTML-table in the viewer-pane +(out = "viewer") or browser (out = "browser"), of if the +results should be plotted (out = "plot", only applies to certain +functions). May be abbreviated.

      ... @@ -237,6 +240,12 @@

      Arg

      Character, name of the mediator variable in a (multivariate response) mediator-model. If missing, mediation() tries to find the treatment variable automatically, however, this may fail.

      + + + typical +

      The typical value that will represent the Bayesian point estimate. +By default, the posterior median is returned. See typical_value +for possible values for this argument.

      @@ -396,7 +405,7 @@

      Examp # Test for Practical Equivalence equi_test(fit) - equi_test(fit, plot = TRUE) + equi_test(fit, out = "plot") } # }

      diff --git a/docs/reference/hoslem_gof.html b/docs/reference/hoslem_gof.html index b5f54de0..8af108e1 100644 --- a/docs/reference/hoslem_gof.html +++ b/docs/reference/hoslem_gof.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/icc.html b/docs/reference/icc.html index 7dada409..6c64fa92 100644 --- a/docs/reference/icc.html +++ b/docs/reference/icc.html @@ -64,7 +64,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -292,8 +292,8 @@

      Examp #> Family: gaussian (identity) #> Formula: Reaction ~ Days + (1 | mygrp) + (1 | Subject) #> -#> ICC (mygrp): 0.002282 -#> ICC (Subject): 0.588989
      +#> ICC (mygrp): 0.006013 +#> ICC (Subject): 0.589883
      # return icc for all models at once icc(fit0, fit1, fit2)
      #> Caution! ICC for random-slope-intercept models usually not meaningful. See 'Note' in `?icc`.
      #> [[1]] #> @@ -317,8 +317,8 @@

      Examp #> Family: gaussian (identity) #> Formula: Reaction ~ Days + (1 | mygrp) + (1 | Subject) #> -#> ICC (mygrp): 0.002282 -#> ICC (Subject): 0.588989 +#> ICC (mygrp): 0.006013 +#> ICC (Subject): 0.589883 #>

      icc1 <- icc(fit1)
      #> Caution! ICC for random-slope-intercept models usually not meaningful. See 'Note' in `?icc`.
      icc2 <- icc(fit2) @@ -336,9 +336,9 @@

      Examp #> Family: gaussian (identity) #> Formula: Reaction ~ Days + (1 | mygrp) + (1 | Subject) #> -#> Within-group-variance: 955.404 -#> Between-group-variance: 5.333 (mygrp) -#> Between-group-variance: 1376.758 (Subject)

      +#> Within-group-variance: 946.474 +#> Between-group-variance: 14.082 (mygrp) +#> Between-group-variance: 1381.596 (Subject)
      # NOT RUN { # compute ICC for Bayesian mixed model, with an ICC for each # sample of the posterior. The print()-method then shows diff --git a/docs/reference/index.html b/docs/reference/index.html index aa1a3432..6ff69441 100644 --- a/docs/reference/index.html +++ b/docs/reference/index.html @@ -21,11 +21,9 @@ - - - + @@ -36,7 +34,6 @@ - @@ -61,7 +58,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -124,7 +121,6 @@

      Reference

      - @@ -137,289 +133,289 @@

      bootstrap()

      - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + diff --git a/docs/reference/inequ_trend.html b/docs/reference/inequ_trend.html index f8779713..57bd203c 100644 --- a/docs/reference/inequ_trend.html +++ b/docs/reference/inequ_trend.html @@ -63,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/is_prime.html b/docs/reference/is_prime.html index 081d9c7d..e37299ae 100644 --- a/docs/reference/is_prime.html +++ b/docs/reference/is_prime.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/mean_n.html b/docs/reference/mean_n.html index 2b7d9717..3c6eec1e 100644 --- a/docs/reference/mean_n.html +++ b/docs/reference/mean_n.html @@ -62,7 +62,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/mn.html b/docs/reference/mn.html deleted file mode 100644 index 355b4ef8..00000000 --- a/docs/reference/mn.html +++ /dev/null @@ -1,194 +0,0 @@ - - - - - - - - -Sum, mean and median for vectors — mn • sjstats - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      -
      - - - -
      - -
      -
      - - -
      - -

      mn(), md() and sm() calculate the mean, - median or sum of values in a vector, but have set argument na.rm - to TRUE by default.

      - -
      - -
      mn(x, na.rm = TRUE)
      -
      -sm(x, na.rm = TRUE)
      -
      -md(x, na.rm = TRUE)
      - -

      Arguments

      -

      Generate nonparametric bootstrap replications

      boot_ci() boot_se() boot_p() boot_est()

      Standard error and confidence intervals for bootstrapped estimates

      check_assumptions() outliers() heteroskedastic() autocorrelation() normality() multicollin()

      Check model assumptions

      chisq_gof()

      Chi-square goodness-of-fit-test

      cod()

      Tjur's Coefficient of Discrimination

      converge_ok() is_singular()

      Convergence test for mixed effects models

      cv()

      Coefficient of Variation

      cv_error() cv_compare()

      Test and training error from model cross-validation

      deff()

      Design effects for two-level mixed models

      efc

      Sample dataset from the EUROFAMCARE project

      eta_sq() omega_sq() cohens_f() anova_stats()

      Effect size statistics for anova

      find_beta() find_beta2() find_cauchy() find_normal()

      Determining distribution parameters

      gmd()

      Gini's Mean Difference

      grpmean()

      Summary of mean values by group

      hdi() equi_test() mcse() mediation() n_eff() rope()

      Compute statistics for MCMC samples and Stan models

      hoslem_gof()

      Hosmer-Lemeshow Goodness-of-fit-test

      icc()

      Intraclass-Correlation Coefficient

      inequ_trend()

      Compute trends in status inequalities

      is_prime()

      Find prime numbers

      mean_n()

      Row means with min amount of valid values

      mwu()

      Mann-Whitney-U-Test

      nhanes_sample

      Sample dataset from the National Health and Nutrition Examination Survey

      odds_to_rr() or_to_rr()

      Get relative risks estimates from logistic regressions or odds ratio values

      overdisp() zero_count()

      Check overdispersion of GL(M)M's

      pca() pca_rotate()

      Tidy summary of Principal Component Analysis

      pred_accuracy()

      Accuracy of predictions from model fit

      pred_vars() resp_var() resp_val() link_inverse() model_frame() model_family() var_names()

      Access information from model objects

      prop() props()

      Proportions of values in a vector

      p_value()

      Get p-values from regression model objects

      r2()

      Compute r-squared of (generalized) linear (mixed) models

      reliab_test() split_half() cronb() mic()

      Check internal consistency of a test or questionnaire

      re_var() get_re_var()

      Random effect variances

      rmse() rse() mse()

      Compute model quality

      robust() svy()

      Robust standard errors for regression models

      scale_weights()

      Rescale design weights for multilevel analysis

      se()

      Standard Error for variables or coefficients

      se_ybar()

      Standard error of sample mean for mixed models

      sjstats-package

      Collection of Convenient Functions for Common Statistical Computations

      smpsize_lmm()

      Sample size for linear mixed models

      std_beta()

      Standardized beta coefficients and CI of linear and mixed models

      svyglm.nb()

      Survey-weighted negative binomial generalised linear model

      table_values()

      Expected and relative table values

      tidy_stan()

      Tidy summary output for stan models

      typical_value()

      Return the typical value of a vector

      var_pop() sd_pop()

      Calculate population variance and standard deviation

      weight() weight2()

      Weight a variable

      wtd_sd() wtd_se() svy_md()

      Weighted statistics for variables

      phi() cramer() xtab_statistics()

      - - - - - - - - - -
      x

      A vector.

      na.rm

      Logical, whether to remove NA values from x before computing -mean, median or sum.

      - -

      Value

      - -

      The mean, median or sum of x.

      - - -

      Examples

      -
      data(efc) -md(efc$neg_c_7)
      #> [1] 11
      mn(efc$neg_c_7)
      #> [1] 11.84978
      mean(efc$neg_c_7)
      #> [1] NA
      sm(efc$neg_c_7 > 15)
      #> [1] 148
      -
      - - - - -
      - - -
      -

      Site built with pkgdown.

      -
      - -
      - - - - - - - diff --git a/docs/reference/mwu.html b/docs/reference/mwu.html index 16f695a6..604d5303 100644 --- a/docs/reference/mwu.html +++ b/docs/reference/mwu.html @@ -66,7 +66,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -131,7 +131,7 @@

      Mann-Whitney-U-Test

      This function performs a Mann-Whitney-U-Test (or Wilcoxon rank sum test, - see wilcox.test and wilcox_test) + see wilcox.test and wilcox_test) for x, for each group indicated by grp. If grp has more than two categories, a comparison between each combination of two groups is performed.

      @@ -177,7 +177,9 @@

      Arg out

      Character vector, indicating whether the results should be printed to console (out = "txt") or as HTML-table in the viewer-pane -(out = "viewer") or browser (out = "browser").

      +(out = "viewer") or browser (out = "browser"), of if the +results should be plotted (out = "plot", only applies to certain +functions). May be abbreviated.

      diff --git a/docs/reference/nhanes_sample.html b/docs/reference/nhanes_sample.html index 07cee35c..0da8a885 100644 --- a/docs/reference/nhanes_sample.html +++ b/docs/reference/nhanes_sample.html @@ -62,7 +62,7 @@ sjstats - 0.14.3.9000 + 0.15.0

      diff --git a/docs/reference/odds_to_rr.html b/docs/reference/odds_to_rr.html index ca3639aa..9bfb2728 100644 --- a/docs/reference/odds_to_rr.html +++ b/docs/reference/odds_to_rr.html @@ -62,7 +62,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/overdisp.html b/docs/reference/overdisp.html index 2eb46a39..06c433ac 100644 --- a/docs/reference/overdisp.html +++ b/docs/reference/overdisp.html @@ -63,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/p_value.html b/docs/reference/p_value.html index a09ac8df..4eed86a9 100644 --- a/docs/reference/p_value.html +++ b/docs/reference/p_value.html @@ -60,7 +60,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -149,7 +149,7 @@

      Arg

      Value

      -

      A tibble with the model coefficients' names (term), +

      A data.frame with the model coefficients' names (term), p-values (p.value) and standard errors (std.error).

      Details

      @@ -163,37 +163,42 @@

      Details treating the t-statistics as Wald z-statistics.

      If p-values already have been computed (e.g. for merModLmerTest-objects - from the lmerTest-package), these will be returned.

      + from the lmerTest-package), these will be returned. +

      + The print()-method has a summary-argument, that - in + case p.kr = TRUE - also prints information on the approximated + degrees of freedom (see 'Examples').

      Examples

      data(efc) # linear model fit fit <- lm(neg_c_7 ~ e42dep + c172code, data = efc) -p_value(fit)
      #> # A tibble: 3 x 3 -#> term p.value std.error -#> <chr> <dbl> <dbl> -#> 1 (Intercept) 2.13e-30 0.566 -#> 2 e42dep 4.91e-30 0.133 -#> 3 c172code 2.07e- 1 0.198
      +p_value(fit)
      #> term p.value std.error +#> 1 (Intercept) 0.000 0.566 +#> 2 e42dep 0.000 0.133 +#> 3 c172code 0.207 0.198
      # Generalized Least Squares fit library(nlme) fit <- gls(follicles ~ sin(2*pi*Time) + cos(2*pi*Time), Ovary, correlation = corAR1(form = ~ 1 | Mare)) -p_value(fit)
      #> # A tibble: 3 x 3 -#> term p.value std.error -#> <chr> <dbl> <dbl> -#> 1 (Intercept) 2.62e-51 0.665 -#> 2 sin(2 * pi * Time) 2.29e- 5 0.645 -#> 3 cos(2 * pi * Time) 1.98e- 1 0.698
      +p_value(fit)
      #> term p.value std.error +#> 1 (Intercept) 0.000 0.665 +#> 2 sin(2 * pi * Time) 0.000 0.645 +#> 3 cos(2 * pi * Time) 0.198 0.698
      # lme4-fit library(lme4) -fit <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) -p_value(fit, p.kr = TRUE)
      #> Computing p-values via Kenward-Roger approximation. Use `p.kr = FALSE` if computation takes too long.
      #> # A tibble: 2 x 3 -#> term p.value std.error -#> <chr> <dbl> <dbl> -#> 1 (Intercept) 1.17e-17 6.82 -#> 2 Days 3.26e- 6 1.55
      +sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) +fit <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) +pv <- p_value(fit, p.kr = TRUE)
      #> Computing p-values via Kenward-Roger approximation. Use `p.kr = FALSE` if computation takes too long.
      +# normal output +pv
      #> term p.value std.error +#> 1 (Intercept) 0 9.790 +#> 2 Days 0 0.815
      +# add information on df and t-statistic +print(pv, summary = TRUE)
      #> term p.value std.error df statistic +#> 1 (Intercept) 0 9.790 22.938 25.670 +#> 2 Days 0 0.815 160.967 12.868
      diff --git a/docs/reference/pred_accuracy.html b/docs/reference/pred_accuracy.html index afce767f..77460095 100644 --- a/docs/reference/pred_accuracy.html +++ b/docs/reference/pred_accuracy.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -192,15 +192,15 @@

      Examp pred_accuracy(efc, fit)
      #> #> # Accuracy of Model Predictions #> -#> Accuracy: 41.17% -#> SE: 4.22%-points +#> Accuracy: 41.12% +#> SE: 6.56%-points #> Method: Correlation between observed and predicted
      # accuracy for linear model, with bootstrapping pred_accuracy(efc, fit, method = "boot", n = 100)
      #> #> # Accuracy of Model Predictions #> #> Accuracy: 41.30% -#> SE: 2.76%-points +#> SE: 2.78%-points #> Method: Correlation between observed and predicted
      # accuracy for logistic regression, with crossvalidation efc$services <- sjmisc::dicho(efc$tot_sc_e, dich.by = 0, as.num = TRUE) @@ -209,8 +209,8 @@

      Examp pred_accuracy(efc, fit)

      #> #> # Accuracy of Model Predictions #> -#> Accuracy: 58.64% -#> SE: 2.21%-points +#> Accuracy: 58.38% +#> SE: 3.92%-points #> Method: Area under Curve
      diff --git a/docs/reference/pred_vars.html b/docs/reference/pred_vars.html index 27b1e7c2..52fe5a15 100644 --- a/docs/reference/pred_vars.html +++ b/docs/reference/pred_vars.html @@ -62,7 +62,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/prop.html b/docs/reference/prop.html index 1d0624d5..7d605039 100644 --- a/docs/reference/prop.html +++ b/docs/reference/prop.html @@ -64,7 +64,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/r2.html b/docs/reference/r2.html index 5f825467..af74b77f 100644 --- a/docs/reference/r2.html +++ b/docs/reference/r2.html @@ -63,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/re_var.html b/docs/reference/re_var.html index a63a7d00..b3f6c838 100644 --- a/docs/reference/re_var.html +++ b/docs/reference/re_var.html @@ -64,7 +64,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -203,12 +203,12 @@

      Examp #> 9.604334
      sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) fit2 <- lmer(Reaction ~ Days + (1 | mygrp) + (Days | Subject), sleepstudy) -re_var(fit2)
      #> Within-group-variance: 654.941 -#> Between-group-variance: 0.000 (mygrp) -#> Between-group-variance: 612.090 (Subject) -#> Random-slope-variance: 35.072 (Subject.Days) -#> Slope-Intercept-covariance: 9.604 (Subject) -#> Slope-Intercept-correlation: 0.066 (Subject)
      +re_var(fit2)
      #> Within-group-variance: 605.877 +#> Between-group-variance: 44.955 (mygrp) +#> Between-group-variance: 615.539 (Subject) +#> Random-slope-variance: 38.305 (Subject.Days) +#> Slope-Intercept-covariance: 1.074 (Subject) +#> Slope-Intercept-correlation: 0.007 (Subject)
      diff --git a/docs/reference/reliab_test.html b/docs/reference/reliab_test.html index 8f8aaa14..21d354fe 100644 --- a/docs/reference/reliab_test.html +++ b/docs/reference/reliab_test.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -161,7 +161,9 @@

      Arg out

      Character vector, indicating whether the results should be printed to console (out = "txt") or as HTML-table in the viewer-pane -(out = "viewer") or browser (out = "browser").

      +(out = "viewer") or browser (out = "browser"), of if the +results should be plotted (out = "plot", only applies to certain +functions). May be abbreviated.

      cor.method diff --git a/docs/reference/rmse.html b/docs/reference/rmse.html index 5918457d..81fdca07 100644 --- a/docs/reference/rmse.html +++ b/docs/reference/rmse.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -141,8 +141,8 @@

      Arg fit -

      Fitted linear model of class lm, -merMod (lme4) or lme (nlme).

      +

      Fitted linear model of class lm, merMod (lme4) +or lme (nlme).

      normalized diff --git a/docs/reference/robust.html b/docs/reference/robust.html index 49942a6e..7846a95c 100644 --- a/docs/reference/robust.html +++ b/docs/reference/robust.html @@ -71,7 +71,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/scale_weights.html b/docs/reference/scale_weights.html index 7783d4b7..34f9d2f2 100644 --- a/docs/reference/scale_weights.html +++ b/docs/reference/scale_weights.html @@ -66,7 +66,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/se.html b/docs/reference/se.html index 247ce09f..4f586069 100644 --- a/docs/reference/se.html +++ b/docs/reference/se.html @@ -64,7 +64,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -219,7 +219,7 @@

      R

      Examples

      # compute standard error for vector -se(rnorm(n = 100, mean = 3))
      #> [1] 0.1049285
      +se(rnorm(n = 100, mean = 3))
      #> [1] 0.1074613
      # compute standard error for each variable in a data frame data(efc) se(efc[, 1:3])
      #> c12hour e15relat e16sex @@ -228,8 +228,25 @@

      Examp library(lme4) fit <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) se(fit)

      #> $Subject -#> intercept_se slope_se -#> 13.86649 2.77520 +#> (Intercept) Days +#> 1 13.86649 2.7752 +#> 2 13.86649 2.7752 +#> 3 13.86649 2.7752 +#> 4 13.86649 2.7752 +#> 5 13.86649 2.7752 +#> 6 13.86649 2.7752 +#> 7 13.86649 2.7752 +#> 8 13.86649 2.7752 +#> 9 13.86649 2.7752 +#> 10 13.86649 2.7752 +#> 11 13.86649 2.7752 +#> 12 13.86649 2.7752 +#> 13 13.86649 2.7752 +#> 14 13.86649 2.7752 +#> 15 13.86649 2.7752 +#> 16 13.86649 2.7752 +#> 17 13.86649 2.7752 +#> 18 13.86649 2.7752 #>
      # compute odds-ratio adjusted standard errors, based on delta method # with first-order Taylor approximation. diff --git a/docs/reference/se_ybar.html b/docs/reference/se_ybar.html index f8b7b1d9..1de9d193 100644 --- a/docs/reference/se_ybar.html +++ b/docs/reference/se_ybar.html @@ -63,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0
      diff --git a/docs/reference/sjstats-package.html b/docs/reference/sjstats-package.html index 2f802432..135e81ce 100644 --- a/docs/reference/sjstats-package.html +++ b/docs/reference/sjstats-package.html @@ -72,7 +72,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/smpsize_lmm.html b/docs/reference/smpsize_lmm.html index 5988275f..f75710dc 100644 --- a/docs/reference/smpsize_lmm.html +++ b/docs/reference/smpsize_lmm.html @@ -62,7 +62,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/std_beta.html b/docs/reference/std_beta.html index d2bcdea2..e1998a5c 100644 --- a/docs/reference/std_beta.html +++ b/docs/reference/std_beta.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -137,8 +137,8 @@

      Arg fit -

      Fitted linear (mixed) model of class lm or -merMod (lme4 package).

      +

      Fitted linear (mixed) model of class lm or merMod +(lme4 package).

      type @@ -179,7 +179,7 @@

      Note



      and

      head(model.matrix(nlme::gls(neg_c_7 ~ as.factor(e42dep), data = efc, na.action = na.omit))).

      - In such cases, use to_dummy to create dummies from + In such cases, use to_dummy to create dummies from factors.

      References

      diff --git a/docs/reference/svyglm.nb.html b/docs/reference/svyglm.nb.html index 599d93d3..a70bcc57 100644 --- a/docs/reference/svyglm.nb.html +++ b/docs/reference/svyglm.nb.html @@ -65,7 +65,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/table_values.html b/docs/reference/table_values.html index 9d42aed4..67a35edd 100644 --- a/docs/reference/table_values.html +++ b/docs/reference/table_values.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -137,9 +137,10 @@

      Arg tab -

      Simple table or ftable of which cell, row and column percentages -as well as expected values are calculated. Tables of class xtabs and other will -be coerced to ftable objects.

      +

      Simple table or ftable of which +cell, row and column percentages as well as expected values are calculated. +Tables of class xtabs and other will be coerced to +ftable objects.

      digits @@ -162,12 +163,12 @@

      Examp
      tab <- table(sample(1:2, 30, TRUE), sample(1:3, 30, TRUE)) # show expected values table_values(tab)$expected
      #> A B C -#> A 5 3 7 -#> B 5 3 7
      # show cell percentages +#> A 3 2 3 +#> B 9 6 7
      # show cell percentages table_values(tab)$cell
      #> 1 2 3 #> -#> 1 16.67 10.00 26.67 -#> 2 16.67 10.00 20.00
      +#> 1 6.67 6.67 13.33 +#> 2 33.33 20.00 20.00
      diff --git a/docs/reference/typical_value.html b/docs/reference/typical_value.html index 71f6fb25..7c72b0ac 100644 --- a/docs/reference/typical_value.html +++ b/docs/reference/typical_value.html @@ -60,7 +60,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/var_pop.html b/docs/reference/var_pop.html index 1b71ec89..ab1cd7da 100644 --- a/docs/reference/var_pop.html +++ b/docs/reference/var_pop.html @@ -60,7 +60,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/weight.html b/docs/reference/weight.html index 31c56916..56e9cf33 100644 --- a/docs/reference/weight.html +++ b/docs/reference/weight.html @@ -61,7 +61,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -163,7 +163,7 @@

      Details

      weight2() sums up all weights values of the associated categories of x, whereas weight() uses a - xtabs formula to weight cases. Thus, weight() + xtabs formula to weight cases. Thus, weight() may return a vector of different length than x.

      Note

      diff --git a/docs/reference/wtd_sd.html b/docs/reference/wtd_sd.html index b68dad25..a79a0fed 100644 --- a/docs/reference/wtd_sd.html +++ b/docs/reference/wtd_sd.html @@ -63,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0 diff --git a/docs/reference/xtab_statistics.html b/docs/reference/xtab_statistics.html index 610bed08..c5533c3a 100644 --- a/docs/reference/xtab_statistics.html +++ b/docs/reference/xtab_statistics.html @@ -63,7 +63,7 @@ sjstats - 0.14.3.9000 + 0.15.0 @@ -146,8 +146,8 @@

      Arg tab -

      A table or ftable. Tables of class -xtabs and other will be coerced to ftable +

      A table or ftable. Tables of class +xtabs and other will be coerced to ftable objects.

      diff --git a/inst/doc/anova-statistics.html b/inst/doc/anova-statistics.html index cee9cb26..9bd3bcb7 100644 --- a/inst/doc/anova-statistics.html +++ b/inst/doc/anova-statistics.html @@ -12,7 +12,7 @@ - + Statistics for Anova Tables @@ -70,7 +70,7 @@

      Statistics for Anova Tables

      Daniel Lüdecke

      -

      2018-06-05

      +

      2018-06-06

      diff --git a/inst/doc/bayesian-statistics.html b/inst/doc/bayesian-statistics.html index 11b093f2..4cfbfdd1 100644 --- a/inst/doc/bayesian-statistics.html +++ b/inst/doc/bayesian-statistics.html @@ -12,7 +12,7 @@ - + Statistics for Bayesian Models @@ -70,7 +70,7 @@

      Statistics for Bayesian Models

      Daniel Lüdecke

      -

      2018-06-05

      +

      2018-06-06

      @@ -145,16 +145,16 @@

      Highest Density Interval

      #> # Highest Density Interval #> #> HDI(90%) -#> b_jobseek_Intercept [ 3.45 3.86] -#> b_depress2_Intercept [ 1.97 2.46] +#> b_jobseek_Intercept [ 3.47 3.88] +#> b_depress2_Intercept [ 1.94 2.43] #> b_jobseek_treat [-0.02 0.15] #> b_jobseek_econ_hard [ 0.01 0.09] -#> b_jobseek_sex [-0.08 0.07] +#> b_jobseek_sex [-0.09 0.07] #> b_jobseek_age [ 0.00 0.01] #> b_depress2_treat [-0.11 0.03] #> b_depress2_job_seek [-0.29 -0.19] #> b_depress2_econ_hard [ 0.11 0.18] -#> b_depress2_sex [ 0.04 0.17] +#> b_depress2_sex [ 0.04 0.18] #> b_depress2_age [-0.00 0.00] #> sigma_jobseek [ 0.70 0.76] #> sigma_depress2 [ 0.59 0.64] @@ -164,33 +164,33 @@

      Highest Density Interval

      #> # Highest Density Interval #> #> HDI(50%) HDI(89%) -#> b_jobseek_Intercept [ 3.59 3.76] [ 3.47 3.87] -#> b_depress2_Intercept [ 2.11 2.31] [ 1.98 2.45] +#> b_jobseek_Intercept [ 3.58 3.74] [ 3.48 3.88] +#> b_depress2_Intercept [ 2.11 2.30] [ 1.94 2.42] #> b_jobseek_treat [ 0.03 0.10] [-0.02 0.15] #> b_jobseek_econ_hard [ 0.04 0.07] [ 0.01 0.09] -#> b_jobseek_sex [-0.04 0.03] [-0.08 0.07] +#> b_jobseek_sex [-0.05 0.02] [-0.08 0.07] #> b_jobseek_age [ 0.00 0.01] [ 0.00 0.01] -#> b_depress2_treat [-0.07 -0.01] [-0.10 0.03] -#> b_depress2_job_seek [-0.26 -0.22] [-0.28 -0.19] -#> b_depress2_econ_hard [ 0.14 0.16] [ 0.11 0.18] -#> b_depress2_sex [ 0.08 0.14] [ 0.04 0.17] +#> b_depress2_treat [-0.07 -0.01] [-0.11 0.03] +#> b_depress2_job_seek [-0.26 -0.22] [-0.29 -0.20] +#> b_depress2_econ_hard [ 0.13 0.16] [ 0.12 0.18] +#> b_depress2_sex [ 0.08 0.13] [ 0.04 0.17] #> b_depress2_age [-0.00 0.00] [-0.00 0.00] -#> sigma_jobseek [ 0.71 0.74] [ 0.70 0.75] -#> sigma_depress2 [ 0.60 0.62] [ 0.59 0.64]
      +#> sigma_jobseek [ 0.71 0.74] [ 0.70 0.76] +#> sigma_depress2 [ 0.61 0.63] [ 0.59 0.64]

      For multilevel models, the type-argument defines whether the HDI of fixed, random or all effects are shown.

      hdi(m5, type = "random")
       #> 
       #> # Highest Density Interval
       #> 
       #>                              HDI(90%)
      -#>  r_e15relat.1.Intercept. [-0.15 1.23]
      -#>  r_e15relat.2.Intercept. [-0.16 1.01]
      -#>  r_e15relat.3.Intercept. [-0.82 0.79]
      -#>  r_e15relat.4.Intercept. [-0.56 0.76]
      -#>  r_e15relat.5.Intercept. [-0.96 0.71]
      -#>  r_e15relat.6.Intercept. [-1.69 0.21]
      -#>  r_e15relat.7.Intercept. [-1.15 0.88]
      -#>  r_e15relat.8.Intercept. [-0.85 0.49]
      +#> r_e15relat.1.Intercept. [-0.10 1.30] +#> r_e15relat.2.Intercept. [-0.11 1.01] +#> r_e15relat.3.Intercept. [-0.86 0.69] +#> r_e15relat.4.Intercept. [-0.52 0.74] +#> r_e15relat.5.Intercept. [-0.91 0.78] +#> r_e15relat.6.Intercept. [-1.63 0.23] +#> r_e15relat.7.Intercept. [-1.12 0.92] +#> r_e15relat.8.Intercept. [-0.84 0.46]

      The computation for the HDI is based on the code from Kruschke 2015, pp. 727f. For default sampling in Stan (4000 samples), the 90% intervals for HDI are more stable than, for instance, 95% intervals. An effective sample size of at least 10.000 is recommended if 95% intervals should be computed (see Kruschke 2015, p. 183ff).

      @@ -203,12 +203,12 @@

      Region of Practical Equivalence (ROPE)

      #> #> inside outside #> b_Intercept 0.0% 100.0% -#> b_e42dep2 43.7% 56.3% +#> b_e42dep2 43.3% 56.7% #> b_e42dep3 0.6% 99.4% #> b_e42dep4 0.0% 100.0% #> b_c12hour 100.0% 0.0% -#> b_c172code2 99.7% 0.3% -#> b_c172code3 78.2% 21.8% +#> b_c172code2 99.5% 0.5% +#> b_c172code3 77.5% 22.5% #> sigma 0.0% 100.0%

      rope() does not suggest limits for the region of practical equivalence and does not tell you how big is practically equivalent to the null value. However, there are suggestions how to choose reasonable limits (see Kruschke 2018), which are implemented in the equi_test() functions.

      @@ -226,20 +226,20 @@

      Test for Practical Equivalence

      #> Samples: 4000 #> #> H0 %inROPE HDI(95%) -#> b_Intercept (*) reject 0.00 [ 7.55 9.91] -#> b_e42dep2 (*) undecided 8.32 [ 0.07 2.06] -#> b_e42dep3 (*) reject 0.02 [ 1.34 3.32] -#> b_e42dep4 (*) reject 0.00 [ 2.79 4.93] +#> b_Intercept (*) reject 0.00 [ 7.54 9.80] +#> b_e42dep2 (*) undecided 8.78 [ 0.11 2.19] +#> b_e42dep3 (*) reject 0.00 [ 1.29 3.36] +#> b_e42dep4 (*) reject 0.00 [ 2.81 4.97] #> b_c12hour accept 100.00 [ 0.00 0.01] -#> b_c172code2 undecided 71.58 [-0.42 0.80] -#> b_c172code3 undecided 22.60 [-0.11 1.48] +#> b_c172code2 undecided 71.05 [-0.46 0.76] +#> b_c172code3 undecided 21.77 [-0.14 1.42] #> sigma reject 0.00 [ 3.41 3.75] #> #> (*) the number of effective samples may be insufficient for some parameters

      For models with binary outcome, there is no concrete way to derive the effect size that defines the ROPE limits. Two examples from Kruschke suggest that a negligible change is about .05 on the logit-scale. In these cases, it is recommended to specify the rope argument, however, if not specified, the ROPE limits are calculated in this way: 0 +/- .1 * sd(intercept) / 4. For all other models, 0 +/- .1 * sd(intercept) is used to determine the ROPE limits. These formulas are based on experience that worked well in real-life situations, but are most likely not generally the best approach.

      Beside a numerical output, the results can also be printed as HTML-table or plotted, using the out-argument. For plots, the 95% distributions of the posterior samles are shown, the ROPE is a light-blue shaded region in the plot, and the distributions are colored depending on whether the parameter values are accepted, rejected or undecided.

      equi_test(m5, out = "plot")
      -

      +

      Tidy Summary of Bayesian Models

      @@ -257,16 +257,16 @@

      Tidy Summary of Bayesian Models

      #> ## Conditional Model: #> #> estimate std.error HDI(89%) neff_ratio Rhat mcse -#> Intercept 1.26 0.74 [-0.34 2.66] 0.17 1.01 0.04 -#> child -1.15 0.10 [-1.29 -0.99] 1.00 1.00 0.00 -#> camper 0.73 0.09 [ 0.57 0.87] 1.00 1.00 0.00 +#> Intercept 1.25 0.77 [-0.39 2.66] 0.2 1.01 0.04 +#> child -1.15 0.10 [-1.31 -1.00] 1.0 1.00 0.00 +#> camper 0.73 0.09 [ 0.59 0.89] 1.0 1.00 0.00 #> #> ## Zero-Inflated Model: #> #> estimate std.error HDI(89%) neff_ratio Rhat mcse -#> Intercept -0.67 0.68 [-1.95 0.46] 0.40 1 0.02 -#> child 1.87 0.34 [ 1.36 2.41] 0.75 1 0.01 -#> camper -0.84 0.36 [-1.47 -0.28] 1.00 1 0.01
      +#> Intercept -0.67 0.72 [-1.95 0.58] 0.39 1 0.02 +#> child 1.87 0.34 [ 1.35 2.42] 0.78 1 0.01 +#> camper -0.84 0.36 [-1.44 -0.32] 0.82 1 0.01

      Additional statistics in the output are:

      • standard errors (which are actually median absolute deviations)
      • @@ -282,16 +282,16 @@

        Tidy Summary of Bayesian Models

        #> ## Conditional Model: #> #> estimate std.error HDI(89%) neff_ratio Rhat mcse -#> Intercept 1.26 0.74 [-0.34 2.66] 0.17 1.01 0.04 -#> child -1.15 0.10 [-1.29 -0.99] 1.00 1.00 0.00 -#> camper 0.73 0.09 [ 0.57 0.87] 1.00 1.00 0.00 +#> Intercept 1.25 0.77 [-0.39 2.66] 0.2 1.01 0.04 +#> child -1.15 0.10 [-1.31 -1.00] 1.0 1.00 0.00 +#> camper 0.73 0.09 [ 0.59 0.89] 1.0 1.00 0.00 #> #> ## Zero-Inflated Model: #> #> estimate std.error HDI(89%) neff_ratio Rhat mcse -#> Intercept -0.68 0.68 [-1.95 0.46] 0.40 1 0.02 -#> child 1.88 0.34 [ 1.36 2.41] 0.75 1 0.01 -#> camper -0.85 0.36 [-1.47 -0.28] 1.00 1 0.01 +#> Intercept -0.69 0.72 [-1.95 0.58] 0.39 1 0.02 +#> child 1.88 0.34 [ 1.35 2.42] 0.78 1 0.01 +#> camper -0.85 0.36 [-1.44 -0.32] 0.82 1 0.01

        To also show random effects of multilevel models, use the type-argument.

        # printing fixed and random effects of multilevel model
         tidy_stan(m3, type = "all")
        @@ -301,32 +301,32 @@ 

        Tidy Summary of Bayesian Models

        #> ## Conditional Model: Fixed effects #> #> estimate std.error HDI(89%) neff_ratio Rhat mcse -#> Intercept 1.26 0.74 [-0.34 2.66] 0.17 1.01 0.04 -#> child -1.15 0.10 [-1.29 -0.99] 1.00 1.00 0.00 -#> camper 0.73 0.09 [ 0.57 0.87] 1.00 1.00 0.00 +#> Intercept 1.25 0.77 [-0.39 2.66] 0.2 1.01 0.04 +#> child -1.15 0.10 [-1.31 -1.00] 1.0 1.00 0.00 +#> camper 0.73 0.09 [ 0.59 0.89] 1.0 1.00 0.00 #> #> ## Conditional Model: Random effect (Intercept: persons) #> #> estimate std.error HDI(89%) neff_ratio Rhat mcse -#> persons.1 -1.33 0.76 [-2.88 0.14] 0.18 1.01 0.04 -#> persons.2 -0.34 0.75 [-1.73 1.26] 0.17 1.01 0.04 -#> persons.3 0.35 0.74 [-1.01 1.98] 0.18 1.01 0.04 -#> persons.4 1.25 0.74 [-0.11 2.87] 0.17 1.01 0.04 +#> persons.1 -1.29 0.77 [-2.98 0.10] 0.22 1.01 0.03 +#> persons.2 -0.34 0.76 [-1.87 1.19] 0.22 1.01 0.03 +#> persons.3 0.37 0.76 [-1.14 1.91] 0.22 1.01 0.03 +#> persons.4 1.27 0.76 [-0.11 2.94] 0.20 1.01 0.03 #> #> ## Zero-Inflated Model: Fixed effects #> #> estimate std.error HDI(89%) neff_ratio Rhat mcse -#> Intercept -0.67 0.68 [-1.95 0.46] 0.40 1 0.02 -#> child 1.87 0.34 [ 1.36 2.41] 0.75 1 0.01 -#> camper -0.84 0.36 [-1.47 -0.28] 1.00 1 0.01 +#> Intercept -0.67 0.72 [-1.95 0.58] 0.39 1 0.02 +#> child 1.87 0.34 [ 1.35 2.42] 0.78 1 0.01 +#> camper -0.84 0.36 [-1.44 -0.32] 0.82 1 0.01 #> #> ## Zero-Inflated Model: Random effect (Intercept: persons) #> #> estimate std.error HDI(89%) neff_ratio Rhat mcse -#> persons.1 1.23 0.79 [ 0.01 2.65] 0.41 1 0.02 -#> persons.2 0.31 0.67 [-0.94 1.49] 0.41 1 0.02 -#> persons.3 -0.16 0.68 [-1.36 1.06] 0.40 1 0.02 -#> persons.4 -1.28 0.75 [-2.58 -0.07] 0.40 1 0.02
        +#> persons.1 1.25 0.81 [-0.16 2.62] 0.36 1 0.02 +#> persons.2 0.32 0.70 [-0.97 1.52] 0.39 1 0.02 +#> persons.3 -0.16 0.71 [-1.40 1.14] 0.38 1 0.02 +#> persons.4 -1.26 0.74 [-2.63 -0.03] 0.42 1 0.02

        By default, 89%-HDI are computed (a convention following McElreath 2015), but other or even multiple HDI can be computed using the prob argument.

        # two different HDI for multivariate response model
         tidy_stan(m2, prob = c(.5, .95))
        @@ -336,20 +336,20 @@ 

        Tidy Summary of Bayesian Models

        #> ## Response: jobseek #> #> estimate std.error HDI(50%) HDI(95%) neff_ratio Rhat mcse -#> Intercept 3.67 0.13 [ 3.59 3.76] [ 3.43 3.92] 1 1 0 -#> treat 0.07 0.05 [ 0.03 0.10] [-0.04 0.16] 1 1 0 -#> econ_hard 0.05 0.02 [ 0.04 0.07] [ 0.01 0.10] 1 1 0 -#> sex -0.01 0.05 [-0.04 0.03] [-0.10 0.09] 1 1 0 +#> Intercept 3.67 0.12 [ 3.58 3.74] [ 3.43 3.92] 1 1 0 +#> treat 0.06 0.05 [ 0.03 0.10] [-0.04 0.17] 1 1 0 +#> econ_hard 0.05 0.02 [ 0.04 0.07] [ 0.00 0.10] 1 1 0 +#> sex -0.01 0.05 [-0.05 0.02] [-0.10 0.09] 1 1 0 #> age 0.00 0.00 [ 0.00 0.01] [-0.00 0.01] 1 1 0 #> #> ## Response: depress2 #> #> estimate std.error HDI(50%) HDI(95%) neff_ratio Rhat mcse -#> Intercept 2.21 0.15 [ 2.11 2.31] [ 1.90 2.48] 1 1 0 +#> Intercept 2.21 0.15 [ 2.11 2.30] [ 1.92 2.49] 1 1 0 #> treat -0.04 0.04 [-0.07 -0.01] [-0.12 0.04] 1 1 0 #> joseek -0.24 0.03 [-0.26 -0.22] [-0.29 -0.18] 1 1 0 -#> econ_hard 0.15 0.02 [ 0.14 0.16] [ 0.11 0.19] 1 1 0 -#> sex 0.11 0.04 [ 0.08 0.14] [ 0.03 0.19] 1 1 0 +#> econ_hard 0.15 0.02 [ 0.13 0.16] [ 0.11 0.19] 1 1 0 +#> sex 0.11 0.04 [ 0.08 0.13] [ 0.02 0.19] 1 1 0 #> age 0.00 0.00 [-0.00 0.00] [-0.00 0.00] 1 1 0
        @@ -376,7 +376,7 @@

        Summary of Mediation Analysis

        #> Indirect effect: -0.02 [-0.04 0.01] #> Total effect: -0.06 [-0.13 0.02] #> -#> Proportion mediated: 27.09% [-68.72% 122.91%]
        +#> Proportion mediated: 26.87% [-73.93% 127.66%]

        Typically, mediation() finds the treatment and mediator variables automatically. If this does not work, use the treatment and mediator arguments to specify the related variable names. For all values, the 90% HDIs are calculated by default. Use prob to calculate a different interval.

        Here is a comparison with the mediation package. Note that the summary()-output of the mediation package shows the indirect effect first, followed by the direct effect.

        summary(m1)
        @@ -386,10 +386,10 @@ 

        Summary of Mediation Analysis

        #> Quasi-Bayesian Confidence Intervals #> #> Estimate 95% CI Lower 95% CI Upper p-value -#> ACME -0.0157 -0.0426 0.01 0.21 -#> ADE -0.0423 -0.1235 0.04 0.32 -#> Total Effect -0.0581 -0.1422 0.03 0.19 -#> Prop. Mediated 0.2292 -1.9299 2.67 0.34 +#> ACME -0.0150 -0.0422 0.01 0.25 +#> ADE -0.0381 -0.1199 0.04 0.34 +#> Total Effect -0.0531 -0.1357 0.03 0.23 +#> Prop. Mediated 0.2390 -1.7250 1.95 0.35 #> #> Sample Size Used: 899 #> @@ -409,7 +409,7 @@

        Summary of Mediation Analysis

        #> Indirect effect: -0.02 [-0.04 0.01] #> Total effect: -0.06 [-0.14 0.03] #> -#> Proportion mediated: 27.09% [-182.98% 237.16%]
        +#> Proportion mediated: 26.87% [-173.37% 227.1%]

        If you want to calculate mean instead of median values from the posterior samples, use the typical-argument. Furthermore, there is a print()-method, which allows to print more digits.

        mediation(m2, typical = "mean", prob = .95) %>% print(digits = 4)
         #> 
        @@ -420,11 +420,11 @@ 

        Summary of Mediation Analysis

        #> Response: depress2 #> #> Estimate HDI (95%) -#> Direct effect: -0.0401 [-0.1243 0.0442] -#> Indirect effect: -0.0158 [-0.0417 0.0079] -#> Total effect: -0.0559 [-0.1435 0.0324] +#> Direct effect: -0.0406 [-0.1231 0.0420] +#> Indirect effect: -0.0156 [-0.0422 0.0082] +#> Total effect: -0.0562 [-0.1387 0.0341] #> -#> Proportion mediated: 28.234% [-181.8368% 238.3049%]
        +#> Proportion mediated: 27.822% [-172.4126% 228.0565%]

        As you can see, the results are similar to what the mediation package produces for non-Bayesian models.

        @@ -436,7 +436,7 @@

        ICC for multilevel models

        #> Family: gaussian (identity) #> Formula: list() neg_c_7 ~ e42dep + c12hour + c172code + (1 | e15relat) list() #> -#> ICC (e15relat): 0.031161
        +#> ICC (e15relat): 0.030859

        One advantage of the brms package is that you can compute the ICC for each sample of the posterior distribution, which allows you to easily calculate uncertainty intervals.

        icc(m4, posterior = TRUE)
         #> 
        @@ -446,18 +446,18 @@ 

        ICC for multilevel models

        #> Formula: mpg ~ wt + hp + (1 | cyl) + (1 + wt | gear) #> #> ## cyl -#> ICC: 0.16 (HDI 89%: 0.00-0.71) -#> Between-group: 4.24 (HDI 89%: 0.00-36.46) +#> ICC: 0.13 (HDI 89%: 0.00-0.69) +#> Between-group: 4.37 (HDI 89%: 0.00-36.84) #> #> ## gear -#> ICC: 0.47 (HDI 89%: 0.00-0.90) -#> Between-group: 13.00 (HDI 89%: 0.00-86.61) +#> ICC: 0.57 (HDI 89%: 0.00-0.93) +#> Between-group: 17.91 (HDI 89%: 0.00-118.88) #> #> ## Residuals -#> Within-group: 6.20 (HDI 89%: 3.41-9.28) +#> Within-group: 6.06 (HDI 89%: 3.50-9.06) #> #> ## Random-slope-variance -#> gear: 1.31 (HDI 89%: 0.00-10.19) +#> gear: 1.75 (HDI 89%: 0.00-13.16) icc(m5, posterior = TRUE) #> @@ -468,10 +468,10 @@

        ICC for multilevel models

        #> #> ## e15relat #> ICC: 0.02 (HDI 89%: 0.00-0.09) -#> Between-group: 0.32 (HDI 89%: 0.00-1.27) +#> Between-group: 0.33 (HDI 89%: 0.00-1.26) #> #> ## Residuals -#> Within-group: 12.80 (HDI 89%: 11.80-13.81)
        +#> Within-group: 12.79 (HDI 89%: 11.78-13.82)

        There’s also a print()-method which allows you to specify the HDI-interval and digits:

        icc(m5, posterior = TRUE) %>% print(prob = .95, digits = 3)
         #> 
        @@ -481,19 +481,19 @@ 

        ICC for multilevel models

        #> Formula: neg_c_7 ~ e42dep + c12hour + c172code + (1 | e15relat) #> #> ## e15relat -#> ICC: 0.025 (HDI 95%: 0.000-0.134) -#> Between-group: 0.324 (HDI 95%: 0.000-1.961) +#> ICC: 0.025 (HDI 95%: 0.000-0.130) +#> Between-group: 0.325 (HDI 95%: 0.000-1.921) #> #> ## Residuals -#> Within-group: 12.797 (HDI 95%: 11.626-14.059)
        +#> Within-group: 12.788 (HDI 95%: 11.594-14.083)

        Bayes r-squared and LOO-adjusted r-squared

        r2() computes either the Bayes r-squared value, or - if loo = TRUE - a LOO-adjusted r-squared value (which comes conceptionally closer to an adjusted r-squared measure).

        For the Bayes r-squared, the standard error is also reported. Note that r2() uses the median as measure of central tendency and the median absolute deviation as measure for variability.

        r2(m5)
        -#>       Bayes R2: 0.168
        -#> Standard Error: 0.022
        +#>       Bayes R2: 0.169
        +#> Standard Error: 0.021
         
         r2(m5, loo = TRUE)
         #> LOO-adjusted R2: 0.145
        diff --git a/inst/doc/mixedmodels-statistics.R b/inst/doc/mixedmodels-statistics.R index 71a2ba3c..b1aa29ff 100644 --- a/inst/doc/mixedmodels-statistics.R +++ b/inst/doc/mixedmodels-statistics.R @@ -11,6 +11,9 @@ data(sleepstudy) # fit linear mixed model m <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) +sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) +m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) + ## ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # Design effect for two-level model with 30 observations per # cluster group (level-2 unit) and an assumed intraclass @@ -43,11 +46,11 @@ scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR) ## ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # Using the t-statistics for calculating the p-value -p_value(m) +p_value(m2) # p-values based on conditional F-tests with # Kenward-Roger approximation for the degrees of freedom -p_value(m, p.kr = TRUE) +p_value(m2, p.kr = TRUE) ## ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- # get residual variance @@ -60,10 +63,6 @@ re_var(m) icc(m) ## ----message = TRUE----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -# fit 2nd model -sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) -m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) - # calculate ICC for both models icc(m, m2) diff --git a/inst/doc/mixedmodels-statistics.Rmd b/inst/doc/mixedmodels-statistics.Rmd index ab9af56e..2fd9c587 100644 --- a/inst/doc/mixedmodels-statistics.Rmd +++ b/inst/doc/mixedmodels-statistics.Rmd @@ -37,6 +37,9 @@ data(sleepstudy) # fit linear mixed model m <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) + +sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) +m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) ``` ## Sample Size Calculation for Mixed Models @@ -94,7 +97,7 @@ Most functions to fit multilevel and mixed effects models only allow to specify `scale_weights()` implements an algorithm proposed by Aaparouhov (2006) and Carle (2009) to rescale design weights in survey data to account for the grouping structure of multilevel models, which then can be used for multilevel modelling. -To calculate a weight-vector that can be used in multilevel models, `scale_weights()` needs that data frame with survey data as `x`-argument. This data frame should contain 1) a _cluster ID_ (argument `cluster.id`), which represents the _strata_ of the survey data (the level-2-cluster variable) and 2) the probability weights (argument `pweight`), which represents the design or sampling weights of the survey data (level-1-weight). +To calculate a weight-vector that can be used in multilevel models, `scale_weights()` needs the data frame with survey data as `x`-argument. This data frame should contain 1) a _cluster ID_ (argument `cluster.id`), which represents the _strata_ of the survey data (the level-2-cluster variable) and 2) the probability weights (argument `pweight`), which represents the design or sampling weights of the survey data (level-1-weight). `scale_weights()` then returns the original data frame, including two new variables: `svywght_a`, where the sample weights `pweight` are adjusted by a factor that represents the proportion of cluster size divided by the sum of sampling weights within each cluster. The adjustment factor for `svywght_b` is the sum of sample weights within each cluster devided by the sum of squared sample weights within each cluster (see Carle (2009), Appendix B, for details). @@ -105,17 +108,17 @@ scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR) ## P-Values -For linear mixed models, the `summary()` in **lme4** does not report p-values. Other objects from regression functions are not consistent in their output structure when reporting p-values. `p_value()` aims at returning a standardized ("tidy") output for any regression model. The return value is always a data frame (a _tibble_) with three columns: _term_, _p.value_ and _std.error_, which represent the name, p-value and standard error for each term. +For linear mixed models, the `summary()` in **lme4** does not report p-values. Other objects from regression functions are not consistent in their output structure when reporting p-values. `p_value()` aims at returning a standardized ("tidy") output for any regression model. The return value is always a data frame with three columns: _term_, _p.value_ and _std.error_, which represent the name, p-value and standard error for each term. For linear mixed models, the approximation of p-values are more precise when `p.kr = TRUE`, based on conditional F-tests with Kenward-Roger approximation for the degrees of freedom (calling `pbkrtest::get_Lb_ddf()`). ```{r} # Using the t-statistics for calculating the p-value -p_value(m) +p_value(m2) # p-values based on conditional F-tests with # Kenward-Roger approximation for the degrees of freedom -p_value(m, p.kr = TRUE) +p_value(m2, p.kr = TRUE) ``` ## Random Effect Variances @@ -153,10 +156,6 @@ icc(m) You can also compute ICCs for multiple models at once: ```{r message = TRUE} -# fit 2nd model -sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) -m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) - # calculate ICC for both models icc(m, m2) ``` diff --git a/inst/doc/mixedmodels-statistics.html b/inst/doc/mixedmodels-statistics.html index 2ec66c8a..72ac8a23 100644 --- a/inst/doc/mixedmodels-statistics.html +++ b/inst/doc/mixedmodels-statistics.html @@ -12,7 +12,7 @@ - + Statistics for Mixed Effects Models @@ -70,7 +70,7 @@

        Statistics for Mixed Effects Models

        Daniel Lüdecke

        -

        2018-06-05

        +

        2018-06-06

        @@ -93,7 +93,10 @@

        Statistics and Measures for Mixed Effects Models

        data(sleepstudy) # fit linear mixed model -m <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy)
        +m <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) + +sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) +m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy)

        Sample Size Calculation for Mixed Models

        The first two functions, deff() and smpsize_lmm(), can be used to approximately calculate the sample size in the context of power calculation. Calculating the sample size for simple linear models is pretty straightforward, however, for (linear) mixed models, statistical power is affected through the change of the variance of test statistics. This is what Hsieh et al. (2003) call a design effect (or variance inflation factor, VIF). Once this design effect is calculated, the sample size calculated for a standard design can be adjusted accordingly.

        @@ -151,7 +154,7 @@

        Trouble Shooting

        Rescale model weights for complex samples

        Most functions to fit multilevel and mixed effects models only allow to specify frequency weights, but not design (i.e. sampling or probability) weights, which should be used when analyzing complex samples and survey data.

        scale_weights() implements an algorithm proposed by Aaparouhov (2006) and Carle (2009) to rescale design weights in survey data to account for the grouping structure of multilevel models, which then can be used for multilevel modelling.

        -

        To calculate a weight-vector that can be used in multilevel models, scale_weights() needs that data frame with survey data as x-argument. This data frame should contain 1) a cluster ID (argument cluster.id), which represents the strata of the survey data (the level-2-cluster variable) and 2) the probability weights (argument pweight), which represents the design or sampling weights of the survey data (level-1-weight).

        +

        To calculate a weight-vector that can be used in multilevel models, scale_weights() needs the data frame with survey data as x-argument. This data frame should contain 1) a cluster ID (argument cluster.id), which represents the strata of the survey data (the level-2-cluster variable) and 2) the probability weights (argument pweight), which represents the design or sampling weights of the survey data (level-1-weight).

        scale_weights() then returns the original data frame, including two new variables: svywght_a, where the sample weights pweight are adjusted by a factor that represents the proportion of cluster size divided by the sum of sampling weights within each cluster. The adjustment factor for svywght_b is the sum of sample weights within each cluster devided by the sum of squared sample weights within each cluster (see Carle (2009), Appendix B, for details).

        data(nhanes_sample)
         scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR)
        @@ -172,20 +175,20 @@ 

        Rescale model weights for complex samples

        P-Values

        -

        For linear mixed models, the summary() in lme4 does not report p-values. Other objects from regression functions are not consistent in their output structure when reporting p-values. p_value() aims at returning a standardized (“tidy”) output for any regression model. The return value is always a data frame (a tibble) with three columns: term, p.value and std.error, which represent the name, p-value and standard error for each term.

        +

        For linear mixed models, the summary() in lme4 does not report p-values. Other objects from regression functions are not consistent in their output structure when reporting p-values. p_value() aims at returning a standardized (“tidy”) output for any regression model. The return value is always a data frame with three columns: term, p.value and std.error, which represent the name, p-value and standard error for each term.

        For linear mixed models, the approximation of p-values are more precise when p.kr = TRUE, based on conditional F-tests with Kenward-Roger approximation for the degrees of freedom (calling pbkrtest::get_Lb_ddf()).

        # Using the t-statistics for calculating the p-value
        -p_value(m)
        +p_value(m2)
         #>          term p.value std.error
        -#> 1 (Intercept)       0     6.825
        -#> 2        Days       0     1.546
        +#> 1 (Intercept)       0     9.879
        +#> 2        Days       0     0.796
         
         # p-values based on conditional F-tests with 
         # Kenward-Roger approximation for the degrees of freedom
        -p_value(m, p.kr = TRUE)
        +p_value(m2, p.kr = TRUE)
         #>          term p.value std.error
        -#> 1 (Intercept)       0     6.825
        -#> 2        Days       0     1.546
        +#> 1 (Intercept) 0 9.893 +#> 2 Days 0 0.804

        Random Effect Variances

        @@ -224,11 +227,7 @@

        Intraclass-Correlation Coefficient

        #> #> ICC (Subject): 0.483090

        You can also compute ICCs for multiple models at once:

        -
        # fit 2nd model
        -sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE)
        -m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy)
        -
        -# calculate ICC for both models
        +
        # calculate ICC for both models
         icc(m, m2)
         #> Caution! ICC for random-slope-intercept models usually not meaningful. See 'Note' in `?icc`.
         #> [[1]]
        @@ -245,8 +244,8 @@ 

        Intraclass-Correlation Coefficient

        #> Family: gaussian (identity) #> Formula: Reaction ~ Days + (1 | mygrp) + (1 | Subject) #> -#> ICC (mygrp): 0.033205 -#> ICC (Subject): 0.594161
        +#> ICC (mygrp): 0.032401 +#> ICC (Subject): 0.594289
        diff --git a/man/check_assumptions.Rd b/man/check_assumptions.Rd index f08a63c8..2277893b 100644 --- a/man/check_assumptions.Rd +++ b/man/check_assumptions.Rd @@ -85,7 +85,8 @@ These functions are wrappers that compute various test statistics, < 0.05 indicates a significant deviation from normal distribution. Note that this formal test almost always yields significant results for the distribution of residuals and visual inspection (e.g. qqplots) - are preferable (see \code{\link[sjPlot]{sjp.lm}} with \code{type = "ma"}). + are preferable (see \code{\link[sjPlot]{plot_model}} with + \code{type = "diag"}). \cr \cr \code{multicollin()} wraps \code{\link[car]{vif}} and returns the logical result as tibble. \code{TRUE}, if multicollinearity @@ -104,7 +105,7 @@ These functions are wrappers that compute various test statistics, These formal tests are very strict and in most cases violation of model assumptions are alerted, though the model is actually ok. It is preferable to check model assumptions based on visual inspection - (see \code{\link[sjPlot]{sjp.lm}} with \code{type = "ma"}). + (see \code{\link[sjPlot]{plot_model}} with \code{type = "diag"}). } \examples{ data(efc) diff --git a/man/cod.Rd b/man/cod.Rd index 665479cd..b8d100c0 100644 --- a/man/cod.Rd +++ b/man/cod.Rd @@ -7,7 +7,7 @@ cod(x) } \arguments{ -\item{x}{Fitted \code{\link{glm}} or \code{\link[lme4]{glmer}} model.} +\item{x}{Fitted \code{\link[stats]{glm}} or \code{\link[lme4]{glmer}} model.} } \value{ The \code{D} Coefficient of Discrimination, also known as diff --git a/man/mean_n.Rd b/man/mean_n.Rd index b1721109..89081ea3 100644 --- a/man/mean_n.Rd +++ b/man/mean_n.Rd @@ -14,19 +14,19 @@ mean_n(dat, n, digits = 2) \item a numeric value that indicates the amount of valid values per row to calculate the row mean; \item or a value between 0 and 1, indicating a proportion of valid values per row to calculate the row mean (see 'Details'). } -If a row's sum of valid values is less than \code{n}, \code{\link{NA}} will be returned as row mean value.} +If a row's sum of valid values is less than \code{n}, \code{NA} will be returned as row mean value.} \item{digits}{Numeric value indicating the number of decimal places to be used for rounding mean value. Negative values are allowed (see ‘Details’).} } \value{ A vector with row mean values of \code{df} for those rows with at least \code{n} - valid values. Else, \code{\link{NA}} is returned. + valid values. Else, \code{NA} is returned. } \description{ This function is similar to the SPSS \code{MEAN.n} function and computes - row means from a \code{\link{data.frame}} or \code{\link{matrix}} if at least \code{n} - values of a row are valid (and not \code{\link{NA}}). + row means from a \code{data.frame} or \code{matrix} if at least \code{n} + values of a row are valid (and not \code{NA}). } \details{ Rounding to a negative number of \code{digits} means rounding to a power of diff --git a/man/mwu.Rd b/man/mwu.Rd index d1bbf382..90d7fb84 100644 --- a/man/mwu.Rd +++ b/man/mwu.Rd @@ -38,7 +38,7 @@ functions). May be abbreviated.} } \description{ This function performs a Mann-Whitney-U-Test (or Wilcoxon rank sum test, - see \code{\link{wilcox.test}} and \code{\link[coin]{wilcox_test}}) + see \code{\link[stats]{wilcox.test}} and \code{\link[coin]{wilcox_test}}) for \code{x}, for each group indicated by \code{grp}. If \code{grp} has more than two categories, a comparison between each combination of two groups is performed. \cr \cr diff --git a/man/p_value.Rd b/man/p_value.Rd index 19f98c79..afc1f251 100644 --- a/man/p_value.Rd +++ b/man/p_value.Rd @@ -16,7 +16,7 @@ conditional F-tests with Kenward-Roger approximation for the df (see 'Details').} } \value{ -A \code{\link{tibble}} with the model coefficients' names (\code{term}), +A \code{data.frame} with the model coefficients' names (\code{term}), p-values (\code{p.value}) and standard errors (\code{std.error}). } \description{ @@ -33,6 +33,10 @@ For linear mixed models (\code{lmerMod}-objects), the computation of \cr \cr If p-values already have been computed (e.g. for \code{merModLmerTest}-objects from the \CRANpkg{lmerTest}-package), these will be returned. + \cr \cr + The \code{print()}-method has a \code{summary}-argument, that - in + case \code{p.kr = TRUE} - also prints information on the approximated + degrees of freedom (see 'Examples'). } \examples{ data(efc) @@ -48,7 +52,8 @@ p_value(fit) # lme4-fit library(lme4) -fit <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) +sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) +fit <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) pv <- p_value(fit, p.kr = TRUE) # normal output diff --git a/man/rmse.Rd b/man/rmse.Rd index a4fea52f..e62d9ac4 100644 --- a/man/rmse.Rd +++ b/man/rmse.Rd @@ -13,8 +13,8 @@ rse(fit) mse(fit) } \arguments{ -\item{fit}{Fitted linear model of class \code{\link{lm}}, -\code{\link[lme4]{merMod}} (\pkg{lme4}) or \code{\link[nlme]{lme}} (\pkg{nlme}).} +\item{fit}{Fitted linear model of class \code{lm}, \code{merMod} (\pkg{lme4}) +or \code{lme} (\pkg{nlme}).} \item{normalized}{Logical, use \code{TRUE} if normalized rmse should be returned.} } diff --git a/man/std_beta.Rd b/man/std_beta.Rd index 3b22f45d..4685f53f 100644 --- a/man/std_beta.Rd +++ b/man/std_beta.Rd @@ -7,8 +7,8 @@ std_beta(fit, type = "std", ci.lvl = 0.95) } \arguments{ -\item{fit}{Fitted linear (mixed) model of class \code{lm} or -\code{\link[lme4]{merMod}} (\CRANpkg{lme4} package).} +\item{fit}{Fitted linear (mixed) model of class \code{lm} or \code{merMod} +(\CRANpkg{lme4} package).} \item{type}{If \code{fit} is of class \code{lm}, normal standardized coefficients are computed by default. Use \code{type = "std2"} to follow @@ -44,7 +44,7 @@ For \code{\link[nlme]{gls}}-objects, standardized beta coefficients may be wrong \cr \cr and \cr \cr \code{head(model.matrix(nlme::gls(neg_c_7 ~ as.factor(e42dep), data = efc, na.action = na.omit)))}. \cr \cr - In such cases, use \code{\link{to_dummy}} to create dummies from + In such cases, use \code{\link[sjmisc]{to_dummy}} to create dummies from factors. } \examples{ diff --git a/man/table_values.Rd b/man/table_values.Rd index f5ee774a..71ae3d2a 100644 --- a/man/table_values.Rd +++ b/man/table_values.Rd @@ -7,9 +7,10 @@ table_values(tab, digits = 2) } \arguments{ -\item{tab}{Simple \code{\link{table}} or \code{\link{ftable}} of which cell, row and column percentages -as well as expected values are calculated. Tables of class \code{\link{xtabs}} and other will -be coerced to \code{\link{ftable}} objects.} +\item{tab}{Simple \code{\link{table}} or \code{\link[stats]{ftable}} of which +cell, row and column percentages as well as expected values are calculated. +Tables of class \code{\link[stats]{xtabs}} and other will be coerced to +\code{ftable} objects.} \item{digits}{Amount of digits for the table percentage values.} } diff --git a/man/weight.Rd b/man/weight.Rd index b9bd2a8c..c907341b 100644 --- a/man/weight.Rd +++ b/man/weight.Rd @@ -30,7 +30,7 @@ These functions weight the variable \code{x} by \details{ \code{weight2()} sums up all \code{weights} values of the associated categories of \code{x}, whereas \code{weight()} uses a - \code{\link{xtabs}} formula to weight cases. Thus, \code{weight()} + \code{\link[stats]{xtabs}} formula to weight cases. Thus, \code{weight()} may return a vector of different length than \code{x}. } \note{ diff --git a/man/xtab_statistics.Rd b/man/xtab_statistics.Rd index e634eeb2..89e16846 100644 --- a/man/xtab_statistics.Rd +++ b/man/xtab_statistics.Rd @@ -14,8 +14,8 @@ xtab_statistics(data, x1 = NULL, x2 = NULL, statistics = c("auto", "cramer", "phi", "spearman", "kendall", "pearson"), ...) } \arguments{ -\item{tab}{A \code{\link{table}} or \code{\link{ftable}}. Tables of class -\code{\link{xtabs}} and other will be coerced to \code{\link{ftable}} +\item{tab}{A \code{\link{table}} or \code{\link[stats]{ftable}}. Tables of class +\code{\link[stats]{xtabs}} and other will be coerced to \code{ftable} objects.} \item{data}{A data frame or a table object. If a table object, \code{x1} and diff --git a/vignettes/mixedmodels-statistics.Rmd b/vignettes/mixedmodels-statistics.Rmd index ab9af56e..2fd9c587 100644 --- a/vignettes/mixedmodels-statistics.Rmd +++ b/vignettes/mixedmodels-statistics.Rmd @@ -37,6 +37,9 @@ data(sleepstudy) # fit linear mixed model m <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy) + +sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) +m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) ``` ## Sample Size Calculation for Mixed Models @@ -94,7 +97,7 @@ Most functions to fit multilevel and mixed effects models only allow to specify `scale_weights()` implements an algorithm proposed by Aaparouhov (2006) and Carle (2009) to rescale design weights in survey data to account for the grouping structure of multilevel models, which then can be used for multilevel modelling. -To calculate a weight-vector that can be used in multilevel models, `scale_weights()` needs that data frame with survey data as `x`-argument. This data frame should contain 1) a _cluster ID_ (argument `cluster.id`), which represents the _strata_ of the survey data (the level-2-cluster variable) and 2) the probability weights (argument `pweight`), which represents the design or sampling weights of the survey data (level-1-weight). +To calculate a weight-vector that can be used in multilevel models, `scale_weights()` needs the data frame with survey data as `x`-argument. This data frame should contain 1) a _cluster ID_ (argument `cluster.id`), which represents the _strata_ of the survey data (the level-2-cluster variable) and 2) the probability weights (argument `pweight`), which represents the design or sampling weights of the survey data (level-1-weight). `scale_weights()` then returns the original data frame, including two new variables: `svywght_a`, where the sample weights `pweight` are adjusted by a factor that represents the proportion of cluster size divided by the sum of sampling weights within each cluster. The adjustment factor for `svywght_b` is the sum of sample weights within each cluster devided by the sum of squared sample weights within each cluster (see Carle (2009), Appendix B, for details). @@ -105,17 +108,17 @@ scale_weights(nhanes_sample, SDMVSTRA, WTINT2YR) ## P-Values -For linear mixed models, the `summary()` in **lme4** does not report p-values. Other objects from regression functions are not consistent in their output structure when reporting p-values. `p_value()` aims at returning a standardized ("tidy") output for any regression model. The return value is always a data frame (a _tibble_) with three columns: _term_, _p.value_ and _std.error_, which represent the name, p-value and standard error for each term. +For linear mixed models, the `summary()` in **lme4** does not report p-values. Other objects from regression functions are not consistent in their output structure when reporting p-values. `p_value()` aims at returning a standardized ("tidy") output for any regression model. The return value is always a data frame with three columns: _term_, _p.value_ and _std.error_, which represent the name, p-value and standard error for each term. For linear mixed models, the approximation of p-values are more precise when `p.kr = TRUE`, based on conditional F-tests with Kenward-Roger approximation for the degrees of freedom (calling `pbkrtest::get_Lb_ddf()`). ```{r} # Using the t-statistics for calculating the p-value -p_value(m) +p_value(m2) # p-values based on conditional F-tests with # Kenward-Roger approximation for the degrees of freedom -p_value(m, p.kr = TRUE) +p_value(m2, p.kr = TRUE) ``` ## Random Effect Variances @@ -153,10 +156,6 @@ icc(m) You can also compute ICCs for multiple models at once: ```{r message = TRUE} -# fit 2nd model -sleepstudy$mygrp <- sample(1:45, size = 180, replace = TRUE) -m2 <- lmer(Reaction ~ Days + (1 | mygrp) + (1 | Subject), sleepstudy) - # calculate ICC for both models icc(m, m2) ```