Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 19 additions & 10 deletions R/standardErrorOfMeasurement.R
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ standardErrorOfMeasurement <- function(jaspResults, dataset, options) {
exitAnalysisIfErrors = TRUE)

if (options[["lord2"]] && options[["lord2NumberOfSplits"]] == "") {
.quitAnalysis(gettext("For the Lord's compound method, the test could not be split in equally sized parts with more than 1 item per part. Consider adding or removing items."))
.quitAnalysis(gettext("For the Lord generalized method, the test could not be split in equally sized parts with more than 1 item per part. Consider adding or removing items."))
}
}

Expand Down Expand Up @@ -128,7 +128,7 @@ standardErrorOfMeasurement <- function(jaspResults, dataset, options) {
# at least one method is selected
for (i in 1:length(selected)) {
if (is.na(selected[[i]][["name"]])) {
.quitAnalysis(gettext("The Lord, Keats, and Lord's compound method are only available for binary data."))
.quitAnalysis(gettext("The Lord, Keats, and Lord generalized method are only available for binary data."))
}
if (is.null(jaspResults[["semMainContainer"]][[paste0(method[i], "State")]])) {
out <- eval(parse(text = selected[[i]][["funString"]]))
Expand Down Expand Up @@ -485,15 +485,15 @@ standardErrorOfMeasurement <- function(jaspResults, dataset, options) {
pl <- ggplot2::ggplot(dat) +
ggplot2::geom_point(ggplot2::aes(x = score, y = tscore), size = 2.5) +
ggplot2::geom_errorbar(ggplot2::aes(x = score, ymin = lower, ymax = upper), width = 0.5) +
ggplot2::labs(x = gettext("Sum Score"), y = gettext("True Score"))
ggplot2::labs(x = gettext("Sum Score"), y = gettext("True Score Estimate"))
} else {
dat <- as.data.frame(ciData)
colnames(dat) <- c("score", "lower", "upper")
dat$tscore <- dat$score
pl <- ggplot2::ggplot(dat) +
ggplot2::geom_ribbon(ggplot2::aes(x = score, ymin = lower, ymax = upper), fill = "grey80") +
ggplot2::geom_line(ggplot2::aes(x = score, y = tscore)) +
ggplot2::labs(x = gettext("Sum Score"), y = gettext("True Score"))
ggplot2::labs(x = gettext("Sum Score"), y = gettext("True Score Estimate"))
}

if (!is.na(cutoff)) {
Expand Down Expand Up @@ -539,8 +539,19 @@ standardErrorOfMeasurement <- function(jaspResults, dataset, options) {
out <- .semPrepareOutMatrix(ncol(X), nc, scoresObj)
fun <- function(partSUMS, ind, cc) {
K <- ncol(partSUMS)
mean_diff <- partSUMS[ind, ] - rowMeans(partSUMS[ind, ]) - matrix(colMeans(partSUMS[ind, ]), length(ind), K, TRUE) + mean(partSUMS[ind, ])
ret <- sqrt(d * sum(rowSums(mean_diff^2) / (K - 1)) / length(ind))
# mean_diff <- partSUMS[ind, ] - rowMeans(partSUMS[ind, ]) - matrix(colMeans(partSUMS[ind, ]), length(ind), K, TRUE) + mean(partSUMS[ind, ])
# ret <- sqrt(d * sum(rowSums(mean_diff^2) / (K - 1)) / length(ind))

col_means <- colMeans(partSUMS) # Global marginal means (X_j)
grand_mean <- mean(col_means) # Global grand mean (M)
row_means <- rowMeans(partSUMS[ind, ]) # Person means (X_i)

# Construct Feldt's deviation term
mean_diff <- partSUMS[ind, ] -
matrix(row_means, length(ind), K, byrow = FALSE) -
matrix(col_means - grand_mean, length(ind), K, byrow = TRUE)

ret <- sqrt( d * sum(mean_diff^2) / ((K - 1) * length(ind)) )
return(ret)
}
out <- .semComputeWithCaseMin(out, S, caseMin, partSUMS, fun)
Expand All @@ -557,9 +568,7 @@ standardErrorOfMeasurement <- function(jaspResults, dataset, options) {
out <- .semPrepareOutMatrix(ncol(X), nc, scoresObj)
scores <- out[, 1]

rawDiffK <- d *
rowSums((partSUMS - matrix(colMeans(partSUMS), N, K, TRUE) - rowMeans(partSUMS) + mean(partSUMS))^2) /
(K - 1)
rawDiffK <- d * rowSums((partSUMS - matrix(colMeans(partSUMS), N, K, TRUE) - rowMeans(partSUMS) + mean(partSUMS))^2) / (K - 1)
betaK <- coef(lm(rawDiffK ~ poly(S, n_poly, raw = TRUE)))
scrs <- sqrt(betaK[1] + rowSums(matrix(betaK[-1], length(scores), n_poly, TRUE) * poly(scores, n_poly, raw = TRUE)))
out[, 2] <- scrs
Expand Down Expand Up @@ -882,7 +891,7 @@ standardErrorOfMeasurement <- function(jaspResults, dataset, options) {
funString = NA,
dependencies = NA)),
lord2 = switch(as.character(nc),
"2" = list(name = "Lord's compound",
"2" = list(name = "Lord generalized",
funString = ".semLord2(dataset, as.numeric(options$lord2NumberOfSplits), scrs, options$minimumGroupSize)",
dependencies = c("lord2", "lord2NumberOfSplits", "minimumGroupSize")),
list(name = NA,
Expand Down
8 changes: 4 additions & 4 deletions R/unidimensionalReliabilityBayesian.R
Original file line number Diff line number Diff line change
Expand Up @@ -75,13 +75,13 @@ unidimensionalReliabilityBayesian <- function(jaspResults, dataset, options) {
itemDroppedSelectedItem = unlist(options[c("itemDeletedOmega", "itemDeletedAlpha", "itemDeletedLambda2", "itemDeletedSplithalf")]),

namesEstimators = list(
tables = c("Coefficient \u03C9", "Coefficient \u03B1", "Guttman's \u03BB2", "Split-half coefficient",
tables = c("McDonald's \u03C9", "Cronbach's \u03B1", "Guttman's \u03BB2", "Split-half coefficient",
"Average interitem correlation", "Mean", "Variance", "SD"),
tables_item = c("Coefficient \u03C9", "Coefficient \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient"),
tables_item = c("McDonald's \u03C9", "Cronbach's \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient"),
gettext("Item-rest correlation"), gettext("Mean"), gettext("Variance"), gettext("SD")),
coefficients = c("Coefficient \u03C9", "Coefficient \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient"),
coefficients = c("McDonald's \u03C9", "Cronbach's \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient"),
gettext("Item-rest correlation")),
plots = list(expression("Coefficient"~omega), expression("Cronbach\'s"~alpha), expression("Guttman's"~lambda[2]),
plots = list(expression("McDonald\'s"~omega), expression("Cronbach\'s"~alpha), expression("Guttman\'s"~lambda[2]),
gettext("Split-half coefficient")),
plotsNoGreek = c("omega", "alpha", "lambda2", "splithalf")
)
Expand Down
53 changes: 28 additions & 25 deletions R/unidimensionalReliabilityFrequentist.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#' @export
unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options) {


Comment on lines 4 to +5
Copy link

Copilot AI Sep 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] The added blank line at line 5 appears unnecessary and doesn't improve code readability.

Suggested change

Copilot uses AI. Check for mistakes.
# check for listwise deletion
datasetOld <- dataset
dataset <- .handleData(datasetOld, options)
Expand Down Expand Up @@ -54,11 +55,11 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
itemDroppedSelected = unlist(options[c("itemDeletedOmega", "itemDeletedAlpha", "itemDeletedLambda2", "itemDeletedSplithalf",
"itemRestCorrelation", "itemMean", "itemVar", "itemSd")]),
namesEstimators = list(
tables = c("Coefficient \u03C9", "Coefficient \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient"),
tables = c("McDonald's \u03C9", "Cronbach's \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient"),
gettext("Average interitem correlation"), gettext("Mean"), gettext("Variance"), gettext("SD")),
tables_item = c("Coefficient \u03C9", "Coefficient \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient"),
tables_item = c("McDonald's \u03C9", "Cronbach's \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient"),
gettext("Item-rest correlation"), gettext("Mean"), gettext("Variance"), gettext("SD")),
coefficientsDeleted = c("Coefficient \u03C9", "Coefficient \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient")))
coefficientsDeleted = c("McDonald's \u03C9", "Cronbach's \u03B1", "Guttman's \u03BB2", gettext("Split-half coefficient")))
)

return(derivedOptions)
Expand Down Expand Up @@ -952,8 +953,8 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
out[["conf"]][["scaleSplithalf"]] <- quantile(samp, probs = c((1 - ciValue) / 2, 1 - (1 - ciValue) / 2), na.rm = TRUE)
out[["se"]][["scaleSplithalf"]] <- sd(samp, na.rm = TRUE)
} else { # interval analytic
partSums1 <- rowSums(dtUse[, splits[[1]]])
partSums2 <- rowSums(dtUse[, splits[[2]]])
partSums1 <- rowSums(dtUse[, splits[[1]], drop = FALSE])
partSums2 <- rowSums(dtUse[, splits[[2]], drop = FALSE])

out[["se"]][["scaleSplithalf"]] <- .seSplithalf(partSums1, partSums2, model[["use.cases"]])
out[["conf"]][["scaleSplithalf"]] <- out[["est"]][["scaleSplithalf"]] + c(-1, 1) * out[["se"]][["scaleSplithalf"]] * qnorm(1 - (1 - ciValue) / 2)
Expand Down Expand Up @@ -1168,31 +1169,33 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
out[["est"]][["itemDeletedSplithalf"]] <- c(NA, NA)
out[["lower"]][["itemDeletedSplithalf"]] <- c(NA, NA)
out[["upper"]][["itemDeletedSplithalf"]] <- c(NA, NA)
}

for (i in seq_len(ncol(dtUse))) {
dtCut <- dtUse[, -i, drop = FALSE]
nit <- ncol(dtCut)
splits <- split(seq_len(nit), 1:2)
est <- .splithalfData(dtCut, splits = splits, useCase = model[["use.cases"]])
out[["est"]][["itemDeletedSplithalf"]][i] <- est
} else {
for (i in seq_len(ncol(dtUse))) {
dtCut <- dtUse[, -i, drop = FALSE]
nit <- ncol(dtCut)
splits <- split(seq_len(nit), 1:2)
est <- .splithalfData(dtCut, splits = splits, useCase = model[["use.cases"]])
out[["est"]][["itemDeletedSplithalf"]][i] <- est

if (options[["intervalMethod"]] == "analytic") {
if (options[["intervalMethod"]] == "analytic") {

partSums1 <- rowSums(dtCut[, splits[[1]]])
partSums2 <- rowSums(dtCut[, splits[[2]]])
partSums1 <- rowSums(dtCut[, splits[[1]]])
partSums2 <- rowSums(dtCut[, splits[[2]]])

se <- .seSplithalf(partSums1, partSums2, model[["use.cases"]])
conf <- est + c(-1, 1) * se * qnorm(1 - (1 - ciValue) / 2)
out[["lower"]][["itemDeletedSplithalf"]][i] <- conf[1]
out[["upper"]][["itemDeletedSplithalf"]][i] <- conf[2]
} else {
itemSamp <- model[["itemDeletedSplithalf"]][["itemSamp"]]
conf <- quantile(itemSamp[, i], probs = c((1 - ciValue) / 2, 1 - (1 - ciValue) / 2), na.rm = TRUE)
out[["lower"]][["itemDeletedSplithalf"]][i] <- conf[1]
out[["upper"]][["itemDeletedSplithalf"]][i] <- conf[2]
se <- .seSplithalf(partSums1, partSums2, model[["use.cases"]])
conf <- est + c(-1, 1) * se * qnorm(1 - (1 - ciValue) / 2)
out[["lower"]][["itemDeletedSplithalf"]][i] <- conf[1]
out[["upper"]][["itemDeletedSplithalf"]][i] <- conf[2]
} else {
itemSamp <- model[["itemDeletedSplithalf"]][["itemSamp"]]
conf <- quantile(itemSamp[, i], probs = c((1 - ciValue) / 2, 1 - (1 - ciValue) / 2), na.rm = TRUE)
out[["lower"]][["itemDeletedSplithalf"]][i] <- conf[1]
out[["upper"]][["itemDeletedSplithalf"]][i] <- conf[2]
}
}
}


}

Comment on lines +1197 to 1200
Copy link

Copilot AI Sep 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] The added blank lines around the closing brace appear unnecessary and don't improve code readability.

Suggested change
}
}

Copilot uses AI. Check for mistakes.
# item-rest correlation
Expand Down
6 changes: 3 additions & 3 deletions inst/Description.qml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Description

Analysis
{
title: qsTr("Unidimensional Reliability")
title: qsTr("Reliability")
qml: "UnidimensionalReliabilityFrequentist.qml"
func: "unidimensionalReliabilityFrequentist"
}
Expand Down Expand Up @@ -57,8 +57,8 @@ Description
}
Analysis
{
menu: qsTr("Unidimensional Reliability")
title: qsTr("Bayesian Unidimensional Reliability")
menu: qsTr("Reliability")
title: qsTr("Bayesian Reliability")
qml: "UnidimensionalReliabilityBayesian.qml"
func: "unidimensionalReliabilityBayesian"
}
Expand Down
2 changes: 1 addition & 1 deletion inst/help/standardErrorOfMeasurement.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ The Standard Error of Measurement (sem) quantifies the precision of a measuremen
- Binomial methods: Based on the idea that the item scores follow a binomial distribution
- Lord: Method that only requires number of correct and incorrect items
- Keats: Corrects the Lord method for supposed bias and uses a reliability coefficient in the process
- Lord's compound: Essentially the Lord method for multiple test parts
- Lord generalized: Essentially the Lord method for multiple test parts
- Number of splits: How many splits to apply, can only be a divisor of the number of items

### Options
Expand Down
4 changes: 2 additions & 2 deletions inst/help/unidimensionalReliabilityBayesian.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ The Bayesian unidimensional reliability analysis allows the user to test the sca
### Scale Statistics
- The CTT-coefficients alpha, lambda 2, and the split-half coefficient are computed from the data covariance matrix. Coefficient omega is computed from the centered data matrix.
- Credible interval: default is 95%
- Coefficient omega: The same as McDonald's omega (for unidimensional data, based on the single-factor model). Note the total test variance in the denominator of the reliability equation is the model implied total variance, that is, the summed model implied covariance matrix.
- Coefficient alpha: The same as Cronbach's alpha (for binary items the coefficient equals KR20)
- McDonald's omega (for unidimensional data, based on the single-factor model). Note the total test variance in the denominator of the reliability equation is the model implied total variance, that is, the summed model implied covariance matrix.
- Cronbach's alpha (for binary items the coefficient equals KR20)
- Guttman's lambda 2
- Split-half coefficient: Correlates the sum scores of two test-halves. By default the variables are split into odd and even numbered items in order or appearance in the variables window. If another split is desired the variables just need to be reordered.
- Average interitem correlation
Expand Down
4 changes: 2 additions & 2 deletions inst/help/unidimensionalReliabilityFrequentist.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ The frequentist unidimensional reliability analysis allows the user to test the

### Scale Statistics
- Confidence interval: default is 95%
- Coefficient omega: The same as McDonald's omega (for unidimensional data, based on the single-factor model). Note the total test variance in the denominator of the reliability equation is the model implied total variance, that is, the summed model implied covariance matrix.
- Coefficient alpha: The same as Cronbach's alpha (for binary items the coefficient equals KR20)
- McDonald's omega (for unidimensional data, based on the single-factor model). Note the total test variance in the denominator of the reliability equation is the model implied total variance, that is, the summed model implied covariance matrix.
- Cronbach's alpha (for binary items the coefficient equals KR20)
- Guttman's lambda 2
- Split-half coefficient: Correlates the sum scores of two test-halves. By default the variables are split into odd and even numbered items in order or appearance in the variables window. If another split is desired the variables just need to be reordered.
- Average interitem correlation
Expand Down
32 changes: 16 additions & 16 deletions inst/qml/StandardErrorOfMeasurement.qml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Form
maxLevels: 50
minNumericLevels: 2
id: variables
onCountChanged:
onCountChanged:
{
var newValues = []
for (var i = 2; i <= variables.count; i++)
Expand Down Expand Up @@ -81,11 +81,11 @@ Form
name: "feldt"
label: qsTr("Feldt")
id: feldt
DropDown
DropDown
{
name: "feldtNumberOfSplits"
label: qsTr("Number of splits")
values: []
values: [2, 3, 4, 5, 6, 7, 8, 9, 10]
id: feldtNumberOfSplits
}
}
Expand All @@ -94,11 +94,11 @@ Form
name: "mollenkopfFeldt"
label: qsTr("Mollenkopf-Feldt")
id: mollenkopfFeldt
DropDown
DropDown
{
name: "mollenkopfFeldtNumberOfSplits"
label: qsTr("Number of splits")
values: []
values: [2, 3, 4, 5, 6, 7, 8, 9, 10]
id: mollenkopfFeldtNumberOfSplits
}
IntegerField
Expand All @@ -120,7 +120,7 @@ Form
name: "anova"
label: qsTr("ANOVA")
}

CheckBox
{
name: "irt"
Expand Down Expand Up @@ -148,13 +148,13 @@ Form
{
enabled: !variables.columnsTypes.includes("ordinal")
name: "lord2"
label: qsTr("Lord's compound")
label: qsTr("Lord generalized")
id: lord2
DropDown
DropDown
{
name: "lord2NumberOfSplits"
label: qsTr("Number of splits")
values: []
values: [2, 3, 4, 5, 6, 7, 8, 9, 10]
id: lord2NumberOfSplits
}
}
Expand All @@ -164,7 +164,7 @@ Form
{
title: qsTr("Options")

CheckBox
CheckBox
{
name: "sumScoreCiTable"
label: qsTr("Sum score table")
Expand All @@ -177,7 +177,7 @@ Form
}
}

CheckBox
CheckBox
{
name: "userReliability"
label: qsTr("User defined reliability")
Expand All @@ -186,7 +186,7 @@ Form
DoubleField
{
name: "reliabilityValue"
label: ""
label: ""
max: 1
defaultValue: .5
}
Expand All @@ -195,17 +195,17 @@ Form
IntegerField
{
name: "minimumGroupSize"
label: qsTr("Minimum number of observations per score group")
label: qsTr("Minimum number of observations per score group")
min: 1
defaultValue: 20
}

CheckBox
CheckBox
{
name: "hideTable"
label: qsTr("Hide SEM table")
}

}

Section
Expand All @@ -216,7 +216,7 @@ Form
{
name: "histogramCounts"
label: qsTr("Histogram of sum score counts")
}
}

CheckBox
{
Expand Down
Loading