Setup, Data, Packages
if (Sys.info()[["user"]] == "jwile") {
loc.data <- "g:/My Drive/CBTI+Light project/Recruitment and data collection/"
} else {
loc.data <- "/Volumes/GoogleDrive/My Drive/CBTI+Light project/Recruitment and data collection/"
}
## graphics packages
library(ggplot2)
library(ggpubr)
library(scales)
## tools and miscellaneous packages
library(JWileymisc)
## data related packages
library(reshape2)
library(data.table)
## modelling packages
library(MplusAutomation)
theme_set(theme_pubr())
Sys.setenv(TZ = "Australia/Melbourne")
SEback <- function(x) 100 - ((100 - x)^2)
d.all <- readRDS(file.path(loc.data, "Data", "SleepWell_Surveys.RDS"))
## create stratification factors
d.all[, dcond := as.integer(condition == "CBT+")]
d.all[, dstrat1 := as.integer(baseline_isi_high == "ISI <= 7" & baseline_stage_high == "Stage <= 2")]
d.all[, dstrat2 := as.integer(baseline_isi_high == "ISI <= 7" & baseline_stage_high == "Stage >= 3")]
d.all[, dstrat3 := as.integer(baseline_isi_high == "ISI >= 8" & baseline_stage_high == "Stage <= 2")]
d.all[, dstrat4 := as.integer(baseline_isi_high == "ISI >= 8" & baseline_stage_high == "Stage >= 3")]
## sleep data
d.sleep <- readRDS(file.path(loc.data, "Data", "SleepWell_Sleep.RDS"))
## merge survey and sleep data
d.all2 <- merge(
d.sleep,
d.all[Survey == "Baseline"],
by = "ID",
all=TRUE)
## weighted.mean(c(1, 4, 2, 5), w = c(.3, .2, .25, .25))
## check that weight the differences relative to first by weights matches
## 1 + 3 * .2 + 1 * .25 + 4 * .25
mcsetup <- "
NEW(
adjref
mtau1 mtau2 mtau3 mtau4
mcbt1 mcbt2 mcbt3 mcbt4
mdif1 mdif2 mdif3 mdif4
bd1 bd2 bd3 bd4
s1cbt s2cbt);
adjref = rm1 + i1 * %0.4f + i2 * %0.4f + i3 * %0.4f;
mtau1 = adjref + rm2 * 0 + rm3 * 0;
mtau2 = adjref + rm2 * 0.5 + rm3 * 0;
mtau3 = adjref + rm2 * 1 + rm3 * 0;
mtau4 = adjref + rm2 * 1 + rm3 * 1;
mcbt1 = (adjref) + (rm2 + s4) * 0 + (rm3 + s24) * 0;
mcbt2 = (adjref) + (rm2 + s4) * 0.5 + (rm3 + s24) * 0;
mcbt3 = (adjref) + (rm2 + s4) * 1 + (rm3 + s24) * 0;
mcbt4 = (adjref) + (rm2 + s4) * 1 + (rm3 + s24) * 1;
mdif1 = mcbt1 - mtau1;
mdif2 = mcbt2 - mtau2;
mdif3 = mcbt3 - mtau3;
mdif4 = mcbt4 - mtau4;
bd1 = mdif1 / sqrt(rv1 + resvar);
bd2 = mdif2 / sqrt(rv1 + resvar);
bd3 = mdif3 / sqrt(rv1 + resvar);
bd4 = mdif4 / sqrt(rv1 + resvar);
s1cbt = rm2 + s4;
s2cbt = rm3 + s24;
rv1 > .001;
rv2 > .001;
rv3 > .001;
"
na.omit(d.all[, .(Corr = round(cor(SD_Total, ISI_Total, use = "pair"), 2)), by = Survey])
## Survey Corr
## 1: Baseline 0.83
## 2: Midpoint 0.70
## 3: Post 0.81
## 4: Follow-Up 0.84
d.all[RADI != "Unknown", RADI2 := RADI]
## median weeks since diagnosis
## a few women are meta-static and were diagnosed a long time ago (>100 weeks)
## so better to use median than mean
median(d.all$CANWHEN, na.rm=TRUE)
## [1] 16
d.all2[!duplicated(ID) & condition == "CBT+", .(ID, PropLightGood, PropLightGoodCons)]
## ID PropLightGood PropLightGoodCons
## 1: 1 0.9761905 0.97619048
## 2: 2 0.6904762 0.69047619
## 3: 3 0.8571429 0.14285714
## 4: 4 0.7142857 0.35714286
## 5: 6 0.7142857 0.11904762
## 6: 8 0.8809524 0.88095238
## 7: 9 0.0000000 0.00000000
## 8: 10 0.9761905 0.97619048
## 9: 12 0.7142857 0.71428571
## 10: 20 0.5476190 0.54761905
## 11: 24 0.2619048 0.26190476
## 12: 25 0.7857143 0.78571429
## 13: 27 0.8333333 0.83333333
## 14: 32 0.7619048 0.76190476
## 15: 33 0.4285714 0.07142857
## 16: 34 0.5357143 0.35714286
## 17: 35 0.6428571 0.64285714
## 18: 36 0.5476190 0.54761905
## 19: 37 0.8095238 0.80952381
## 20: 43 0.5476190 0.54761905
## 21: 44 0.7380952 0.73809524
## 22: 45 NA NA
## 23: 49 0.7857143 0.78571429
## 24: 50 0.7142857 0.71428571
## 25: 52 0.6428571 0.64285714
## 26: 54 0.7857143 0.78571429
## 27: 56 NA NA
## 28: 57 0.8095238 0.80952381
## 29: 58 0.7857143 0.78571429
## 30: 59 0.5952381 0.59523810
## 31: 65 NA NA
## 32: 66 0.6666667 0.09523810
## 33: 70 0.7380952 0.73809524
## 34: 73 0.8571429 0.85714286
## 35: 75 0.6500000 0.30952381
## 36: 77 NA NA
## 37: 78 0.4761905 0.47619048
## 38: 79 0.6666667 0.66666667
## 39: 84 0.9761905 0.97619048
## 40: 87 0.8571429 0.85714286
## 41: 88 0.5476190 0.54761905
## 42: 89 0.3095238 0.30952381
## 43: 90 0.8095238 0.80952381
## 44: 92 0.1428571 0.14285714
## 45: 94 0.6428571 0.64285714
## 46: 96 NA NA
## 47: 97 0.5714286 0.19047619
## 48: 100 NA NA
## ID PropLightGood PropLightGoodCons
## P = average days per week of those reported BL used >= 20 min
## PCons = average days per week out of 42 total days BL used >= 20 min, droping out after 1 day counts as 41 days of non wear
## PLG = average days per week of those reported BL used >= 20 min AND -1 to +4 hours of habitual rise time
## PLGCons = average days per week out of 42 total days BL used >= 20 min AND -1 to +4 hours of habitual rise time, dropping out after 1 day counts as 41 days of non wear, not returning a week of sleep diary counts as 7 days non wear, etc.
colMeans(na.omit(d.all2[condition == "CBT+", .(P = mean(GTime >= 20, na.rm=TRUE),
PCons = sum(GTime >= 20, na.rm=TRUE) / 42,
PLG = PropLightGood[1],
PLGCons = PropLightGoodCons[1]), by = ID])) * 7
## ID P PCons PLG PLGCons
## 337.536585 6.281105 4.475610 4.779675 4.182927
Alpha and Omega Internal Consistency Reliability
## ISI
d.all[Survey != "Screening", .(alphaISI = psych::alpha(.SD)$total$raw_alpha),
.SDcols = paste0("ISI_", 1:7), by = Survey]
## Survey alphaISI
## 1: Baseline 0.8724876
## 2: Midpoint 0.8738744
## 3: Post 0.8948870
## 4: Follow-Up 0.9232795
d.all[Survey != "Screening", .(omegaISI = psych::omega(.SD, fm = "ml", plot = FALSE)$omega.tot),
.SDcols = paste0("ISI_", 1:7), by = Survey]
## Survey omegaISI
## 1: Baseline 0.9278312
## 2: Midpoint 0.9176502
## 3: Post 0.9302033
## 4: Follow-Up 0.9573001
## fatigue
d.all[Survey != "Screening", .(alphaFA = psych::alpha(.SD)$total$raw_alpha),
.SDcols = paste0("FA_", 1:8), by = Survey]
## Survey alphaFA
## 1: Baseline 0.9503524
## 2: Midpoint 0.9611975
## 3: Post 0.9599523
## 4: Follow-Up 0.9670744
d.all[Survey != "Screening", .(omegaFA = psych::omega(.SD, fm = "ml", plot = FALSE)$omega.tot),
.SDcols = paste0("FA_", 1:8), by = Survey]
## Survey omegaFA
## 1: Baseline 0.9715286
## 2: Midpoint 0.9738202
## 3: Post 0.9748024
## 4: Follow-Up 0.9789168
## sleep disturbance
d.all[Survey != "Screening", .(alphaSD = psych::alpha(.SD, check.keys = TRUE)$total$raw_alpha),
.SDcols = paste0("SD_", 1:8), by = Survey]
## Survey alphaSD
## 1: Baseline 0.9216345
## 2: Midpoint 0.8774621
## 3: Post 0.9248093
## 4: Follow-Up 0.9137247
d.all[Survey != "Screening", .(omegaSD = psych::omega(.SD, fm = "ml", plot = FALSE)$omega.tot),
.SDcols = paste0("SD_", 1:8), by = Survey]
## Survey omegaSD
## 1: Baseline 0.9549915
## 2: Midpoint 0.9397980
## 3: Post 0.9547637
## 4: Follow-Up 0.9565814
## sleep related impairment
d.all[Survey != "Screening", .(alphaSRI = psych::alpha(.SD, check.keys = TRUE)$total$raw_alpha),
.SDcols = paste0("SRI_", 1:8), by = Survey]
## Survey alphaSRI
## 1: Baseline 0.8946007
## 2: Midpoint 0.9129292
## 3: Post 0.9109589
## 4: Follow-Up 0.9144879
d.all[Survey != "Screening", .(omegaSRI = psych::omega(.SD, fm = "ml", plot = FALSE)$omega.tot),
.SDcols = paste0("SRI_", 1:8), by = Survey]
## Survey omegaSRI
## 1: Baseline 0.9282076
## 2: Midpoint 0.9466446
## 3: Post 0.9476426
## 4: Follow-Up 0.9519870
## depression symptoms
d.all[Survey != "Screening", .(alphaDEP = psych::alpha(.SD)$total$raw_alpha),
.SDcols = paste0("DEP_", 1:8), by = Survey]
## Survey alphaDEP
## 1: Baseline 0.9456960
## 2: Midpoint 0.9532679
## 3: Post 0.9623747
## 4: Follow-Up 0.9595301
d.all[Survey != "Screening", .(omegaFA = psych::omega(.SD, fm = "ml", plot = FALSE)$omega.tot),
.SDcols = paste0("DEP_", 1:8), by = Survey]
## Survey omegaFA
## 1: Baseline 0.9657872
## 2: Midpoint 0.9662748
## 3: Post 0.9788668
## 4: Follow-Up 0.9734325
## anxiety symptoms
d.all[Survey != "Screening", .(alphaANX = psych::alpha(.SD)$total$raw_alpha),
.SDcols = paste0("ANX_", 1:8), by = Survey]
## Survey alphaANX
## 1: Baseline 0.9403866
## 2: Midpoint 0.9394364
## 3: Post 0.9657214
## 4: Follow-Up 0.9407899
d.all[Survey != "Screening", .(omegaANX = psych::omega(.SD, fm = "ml", plot = FALSE)$omega.tot),
.SDcols = paste0("ANX_", 1:8), by = Survey]
## Survey omegaANX
## 1: Baseline 0.9591951
## 2: Midpoint 0.9599043
## 3: Post 0.9763747
## 4: Follow-Up 0.9637339
Demographics Table
## demographics & medical / treatment variables at baseline
egltable(c(
"Age", "MARI2", "CHILDLV18", "ETH", "ETH2", "INCOM3", "EDU3", "EMPL3",
"MENOPAUSE", "CANSTG", "SURG", "RADIATION", "HORMONAL", "CAN1"),
g = "condition",
data = droplevels(d.all[Survey == "Baseline"]),
parametric = TRUE)[, c(1, 3, 2, 4)]
## Warning in chisq.test(tabs, correct = FALSE, simulate.p.value = simChisq, : Chi-
## squared approximation may be incorrect
## CBT+ M (SD)/N (%) Relaxation M (SD)/N (%)
## 1: Age 48.52 (9.51) 51.19 (12.02)
## 2: MARI2
## 3: Married/living as 33 (70.2) 26 (57.8)
## 4: Other 14 (29.8) 19 (42.2)
## 5: CHILDLV18
## 6: Child < 18y 18 (38.3) 10 (22.2)
## 7: No Child < 18y 29 (61.7) 35 (77.8)
## 8: ETH
## 9: White 41 (87.2) 40 (88.9)
## 10: Indigenous 0 (0.0) 1 (2.2)
## 11: Asian 2 (4.3) 2 (4.4)
## 12: Indian 0 (0.0) 2 (4.4)
## 13: Other 4 (8.5) 0 (0.0)
## 14: ETH2
## 15: Non-White 6 (12.8) 5 (11.1)
## 16: White 41 (87.2) 40 (88.9)
## 17: INCOM3
## 18: <= $50,000 7 (16.3) 10 (24.4)
## 19: $50,001 - $100,000 15 (34.9) 20 (48.8)
## 20: > $100,000 21 (48.8) 11 (26.8)
## 21: EDU3
## 22: < Bachelor 14 (29.8) 18 (40.0)
## 23: Bachelor 17 (36.2) 12 (26.7)
## 24: > Bachelor 16 (34.0) 15 (33.3)
## 25: EMPL3
## 26: Employed 32 (68.1) 29 (64.4)
## 27: Retired 6 (12.8) 9 (20.0)
## 28: Not employed 9 (19.1) 7 (15.6)
## 29: MENOPAUSE
## 30: Current 11 (26.2) 10 (22.7)
## 31: No 20 (47.6) 16 (36.4)
## 32: Past 11 (26.2) 18 (40.9)
## 33: CANSTG
## 34: 1 5 (11.1) 9 (20.5)
## 35: 2 16 (35.6) 14 (31.8)
## 36: 3 12 (26.7) 10 (22.7)
## 37: 4 12 (26.7) 11 (25.0)
## 38: SURG
## 39: No 12 (26.7) 14 (31.8)
## 40: Yes 33 (73.3) 30 (68.2)
## 41: RADIATION
## 42: Current 5 (11.4) 5 (11.4)
## 43: No 6 (13.6) 12 (27.3)
## 44: Planned 26 (59.1) 21 (47.7)
## 45: UnknownPlan 7 (15.9) 6 (13.6)
## 46: HORMONAL
## 47: Current 6 (14.0) 8 (18.6)
## 48: No 11 (25.6) 13 (30.2)
## 49: Planned 12 (27.9) 11 (25.6)
## 50: UnknownPlan 14 (32.6) 11 (25.6)
## 51: CAN1
## 52: No 9 (20.0) 7 (15.9)
## 53: Yes 36 (80.0) 37 (84.1)
## CBT+ M (SD)/N (%) Relaxation M (SD)/N (%)
## Test
## 1: t(df=89) = 1.18, p = .241, d = 0.25
## 2: Chi-square = 1.55, df = 1, p = .214, Phi = 0.13
## 3:
## 4:
## 5: Chi-square = 2.81, df = 1, p = .094, Phi = 0.17
## 6:
## 7:
## 8: Chi-square = 6.97, df = 4, p = .137, Cramer's V = 0.28
## 9:
## 10:
## 11:
## 12:
## 13:
## 14: Chi-square = 0.06, df = 1, p = .807, Phi = 0.03
## 15:
## 16:
## 17: Chi-square = 4.32, df = 2, p = .115, Cramer's V = 0.23
## 18:
## 19:
## 20:
## 21: Chi-square = 1.35, df = 2, p = .509, Cramer's V = 0.12
## 22:
## 23:
## 24:
## 25: Chi-square = 0.95, df = 2, p = .620, Cramer's V = 0.10
## 26:
## 27:
## 28:
## 29: Chi-square = 2.14, df = 2, p = .344, Cramer's V = 0.16
## 30:
## 31:
## 32:
## 33: Chi-square = 1.49, df = 3, p = .684, Cramer's V = 0.13
## 34:
## 35:
## 36:
## 37:
## 38: Chi-square = 0.29, df = 1, p = .593, Phi = 0.06
## 39:
## 40:
## 41: Chi-square = 2.61, df = 3, p = .456, Cramer's V = 0.17
## 42:
## 43:
## 44:
## 45:
## 46: Chi-square = 0.86, df = 3, p = .836, Cramer's V = 0.10
## 47:
## 48:
## 49:
## 50:
## 51: Chi-square = 0.25, df = 1, p = .615, Phi = 0.05
## 52:
## 53:
## Test
## mh and sleep tx at baseline
egltable(c(
"GRP", "PSY", "MED", "PSYSL", "MEDSL", "HERSL"),
g = "condition",
data = droplevels(d.all[Survey == "Baseline"]),
parametric = TRUE)[, c(1, 3, 2, 4)]
## Warning in chisq.test(tabs, correct = FALSE, simulate.p.value = simChisq, : Chi-
## squared approximation may be incorrect
## CBT+ N (%) Relaxation N (%)
## 1: GRP
## 2: No 29 (64.4) 34 (79.1)
## 3: Yes 16 (35.6) 9 (20.9)
## 4: PSY
## 5: No 27 (61.4) 31 (70.5)
## 6: Yes 17 (38.6) 13 (29.5)
## 7: MED
## 8: No 35 (79.5) 34 (79.1)
## 9: Yes 9 (20.5) 9 (20.9)
## 10: PSYSL
## 11: No 39 (88.6) 41 (93.2)
## 12: Yes 5 (11.4) 3 (6.8)
## 13: MEDSL
## 14: No 28 (62.2) 28 (63.6)
## 15: Yes 17 (37.8) 16 (36.4)
## 16: HERSL
## 17: No 35 (77.8) 36 (81.8)
## 18: Yes 10 (22.2) 8 (18.2)
## Test
## 1: Chi-square = 2.31, df = 1, p = .128, Phi = 0.16
## 2:
## 3:
## 4: Chi-square = 0.81, df = 1, p = .368, Phi = 0.10
## 5:
## 6:
## 7: Chi-square = 0.00, df = 1, p = .956, Phi = 0.01
## 8:
## 9:
## 10: Chi-square = 0.55, df = 1, p = .458, Phi = 0.08
## 11:
## 12:
## 13: Chi-square = 0.02, df = 1, p = .890, Phi = 0.01
## 14:
## 15:
## 16: Chi-square = 0.23, df = 1, p = .635, Phi = 0.05
## 17:
## 18:
## PROs at baseline
egltable(c(
"ISI_Total", "SRI_TScore", "SD_TScore", "FA_TScore", "DEP_TScore", "ANX_TScore"),
g = "condition",
data = droplevels(d.all[Survey == "Baseline"]),
parametric = TRUE)
## Relaxation M (SD) CBT+ M (SD)
## 1: ISI_Total 12.86 (5.69) 13.17 (5.61)
## 2: SRI_TScore 56.86 (7.71) 56.72 (7.93)
## 3: SD_TScore 54.74 (8.12) 55.27 (9.36)
## 4: FA_TScore 59.09 (7.77) 57.37 (7.51)
## 5: DEP_TScore 51.89 (9.92) 50.09 (8.67)
## 6: ANX_TScore 51.84 (10.14) 52.83 (9.10)
## Test
## 1: t(df=90) = -0.26, p = .798, d = 0.05
## 2: t(df=89) = 0.09, p = .931, d = 0.02
## 3: t(df=89) = -0.29, p = .772, d = 0.06
## 4: t(df=89) = 1.07, p = .286, d = 0.22
## 5: t(df=89) = 0.93, p = .357, d = 0.19
## 6: t(df=89) = -0.49, p = .627, d = 0.10
## PROs at baseline not by group for MIDs
egltable(c(
"ISI_Total", "SRI_TScore", "SD_TScore", "FA_TScore", "DEP_TScore", "ANX_TScore"),
data = droplevels(d.all[Survey == "Baseline"]),
parametric = TRUE)
## M (SD)
## 1: ISI_Total 13.02 (5.62)
## 2: SRI_TScore 56.79 (7.78)
## 3: SD_TScore 55.01 (8.74)
## 4: FA_TScore 58.21 (7.65)
## 5: DEP_TScore 50.96 (9.29)
## 6: ANX_TScore 52.35 (9.57)
## credibility and expectancy
egltable(c("CEQ_Cred", "CEQ_Exp"),
g = "condition",
data = droplevels(d.all[Survey == "Baseline"]),
parametric = TRUE)
## Relaxation M (SD) CBT+ M (SD) Test
## 1: CEQ_Cred 5.47 (1.60) 6.04 (1.47) t(df=87) = -1.76, p = .082, d = 0.37
## 2: CEQ_Exp -0.06 (1.07) 0.05 (0.86) t(df=86) = -0.52, p = .606, d = 0.11
Insomnia Symptoms
isidat <- copy(reshape(d.all[, .(
ID, dcond, dstrat2, dstrat3, dstrat4, ISI_Total, Survey)],
v.names = "ISI_Total",
timevar = "Survey",
idvar = c("ID", "dcond", "dstrat2", "dstrat3", "dstrat4"),
direction = "wide"))
setnames(isidat, names(isidat),
c("id", "dcond", "dstrat2", "dstrat3", "dstrat4",
"ISI0", "ISI1", "ISI2", "ISI3", "ISI4"))
isidat <- isidat[!is.na(ISI1) | !is.na(ISI2) | !is.na(ISI3) | !is.na(ISI4)]
isifit <- mplusModeler(mplusObject(
TITLE = "Primary ISI Analysis;",
ANALYSIS = "ESTIMATOR = ML;",
MODEL = "
[ISI1 - ISI4@0]; ! fix residual intercepts to 0
ISI1* ISI2* ISI3* ISI4* (resvar); ! homogenous residual variance
int BY ISI1@1 ISI2@1 ISI3@1 ISI4@1;
slope BY ISI1@0 ISI2@0.5 ISI3@1 ISI4@1;
slope2 BY ISI1@0 ISI2@0 ISI3@0 ISI4@1;
int ON dstrat2 dstrat3 dstrat4 dcond@0 (i1 - i4);
slope ON dcond (s4);
slope2 ON dcond (s24);
[int* slope* slope2*] (rm1 - rm3);
int* slope* slope2*0 (rv1-rv3);
int WITH slope*0;
slope2 WITH int*0 slope*0;
",
OUTPUT = "STDYX; CINTERVAL;",
MODELCONSTRAINT = sprintf(
mcsetup,
mean(isidat$dstrat2),
mean(isidat$dstrat3),
mean(isidat$dstrat4)),
usevariables = c("ISI1", "ISI2", "ISI3", "ISI4",
"dstrat2", "dstrat3", "dstrat4", "dcond"),
rdata = as.data.frame(isidat)),
dataout = "01_isi.dat", run = TRUE, writeData = "always", hashfilename = FALSE)
## Wrote model to: 01_isi.inp
## Wrote data to: 01_isi.dat
## The file(s)
## '01_isi.dat'
## currently exist(s) and will be overwritten
## model information, coefficients, and effect sizes
summary(isifit)
## Primary ISI Analysis
##
## Estimated using ML
## Number of obs: 92, number of (free) parameters: 15
##
## Model: Chi2(df = 15) = 27.001, p = 0.0287
## Baseline model: Chi2(df = 22) = 172.026, p = 0
##
## Fit Indices:
##
## CFI = 0.92, TLI = 0.883, SRMR = 0.114
## RMSEA = 0.093, 90% CI [0.03, 0.149], p < .05 = 0.106
## AIC = 1694.543, BIC = 1732.37
cbind(
coef(isifit, param = c("reg", "exp", "new")),
confint(isifit, param = c("reg", "exp", "new"))[,-1])
## Label est se pval LowerCI UpperCI
## 13 INT<-DSTRAT2 0.119 1.728 0.945 -3.269 3.506
## 14 INT<-DSTRAT3 6.600 1.267 0.000 4.116 9.084
## 15 INT<-DSTRAT4 5.439 1.259 0.000 2.972 7.907
## 16 INT<-DCOND 0.000 0.000 999.000 0.000 0.000
## 17 SLOPE<-DCOND -3.131 1.194 0.009 -5.471 -0.790
## 18 SLOPE2<-DCOND 2.316 1.479 0.117 -0.582 5.214
## 22 ISI1<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 23 ISI2<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 24 ISI3<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 25 ISI4<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 26 INT<-Intercepts 8.504 1.040 0.000 6.465 10.543
## 27 SLOPE<-Intercepts -1.925 0.889 0.030 -3.667 -0.183
## 28 SLOPE2<-Intercepts -2.523 1.113 0.023 -4.705 -0.342
## 36 ADJREF 12.906 0.464 0.000 11.996 13.816
## 37 MTAU1 12.906 0.464 0.000 11.996 13.816
## 38 MTAU2 11.944 0.560 0.000 10.846 13.041
## 39 MTAU3 10.981 0.898 0.000 9.221 12.741
## 40 MTAU4 8.458 1.093 0.000 6.315 10.600
## 41 MCBT1 12.906 0.464 0.000 11.996 13.816
## 42 MCBT2 10.378 0.544 0.000 9.312 11.445
## 43 MCBT3 7.851 0.862 0.000 6.161 9.540
## 44 MCBT4 7.643 0.948 0.000 5.784 9.502
## 45 MDIF1 0.000 0.000 1.000 0.000 0.000
## 46 MDIF2 -1.565 0.597 0.009 -2.735 -0.395
## 47 MDIF3 -3.131 1.194 0.009 -5.471 -0.790
## 48 MDIF4 -0.815 1.412 0.564 -3.583 1.954
## 49 BD1 0.000 0.000 1.000 0.000 0.000
## 50 BD2 -0.343 0.133 0.010 -0.604 -0.082
## 51 BD3 -0.686 0.267 0.010 -1.208 -0.163
## 52 BD4 -0.178 0.310 0.565 -0.786 0.429
## 53 S1CBT -5.056 0.856 0.000 -6.733 -3.378
## 54 S2CBT -0.207 0.973 0.831 -2.115 1.700
## create all the data for the plot, including means, effect sizes, Ns
## confidence intervals and significance
isi.plotdat <- list()
isi.plotdat <- within(isi.plotdat, {
est <- as.data.table(cbind(coef(isifit), confint(isifit)[, -1]))[37:44, ]
est[, Condition := rep(c("TAU+", "CBT-I+Light"), each = 4)]
est[, Timepoint := c(0:3, 0:3)]
freq <- xtabs(~ condition + Survey, data = d.all[!is.na(ISI_Total)])
ES <- c(sprintf("ES = %s",
gsub("^0", "",
format(round(abs(subset(coef(isifit, param = c("new")),
grepl("BD", Label))$est), 2),
digits = 2, nsmall = 2))))[-1]
labels <- c(
sprintf("Baseline\nCBT-I+Light N: %d \nTAU+ N: %d ",
freq["CBT+", "Baseline"],
freq["Relaxation", "Baseline"]),
sprintf("Midpoint\n%d\n%d",
freq["CBT+", "Midpoint"],
freq["Relaxation", "Midpoint"]),
sprintf("Post\n%d\n%d",
freq["CBT+", "Post"],
freq["Relaxation", "Post"]),
sprintf("Follow-up\n%d\n%d",
freq["CBT+", "Follow-Up"],
freq["Relaxation", "Follow-Up"]))
sig <- symnum(subset(coef(isifit), grepl("MDIF", Label))$pval,
legend = FALSE, na = "",
cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*", "n.s."))[-1]
yval.sig <- with(est, max(UpperCI) + (abs(diff(range(c(LowerCI, UpperCI)))) * .02))
yval.es <- with(est, min(LowerCI) - (abs(diff(range(c(LowerCI, UpperCI)))) * .03))
})
## make the plot
p.isi <- with(isi.plotdat,
ggplot(est, aes(Timepoint + ifelse(Condition=="CBT-I+Light", .03, -.03),
est, ymin = LowerCI, ymax = UpperCI,
colour = Condition, shape = Condition)) +
geom_hline(yintercept = 8, linetype = 3, colour = "grey50") +
geom_line(size = .1, alpha = .5) + geom_pointrange(size=1) +
scale_x_continuous("", breaks = 0:3, labels = labels) +
scale_y_continuous("Insomnia Symptoms (Adjusted)",
breaks = seq(5, 18, 1)) +
annotate("text", x = 1, y = yval.sig, label = sig[1]) +
annotate("text", x = 2, y = yval.sig, label = sig[2]) +
annotate("text", x = 3, y = yval.sig, label = sig[3]) +
annotate("text", x = 1, y = yval.es, label = ES[1]) +
annotate("text", x = 2, y = yval.es, label = ES[2]) +
annotate("text", x = 3, y = yval.es, label = ES[3]))
p.isi <- set_palette(p.isi, "jco")
print(p.isi)
## ## make plot for graphical abstract
## p.graphab <- with(isi.plotdat,
## ggplot(est, aes(Timepoint + ifelse(Condition=="CBT+", .03, -.03),
## est, ymin = LowerCI, ymax = UpperCI,
## colour = Condition, shape = Condition)) +
## geom_hline(yintercept = 8, linetype = 3, colour = "grey50") +
## geom_line(size = .06) + geom_pointrange(size=1) +
## scale_x_continuous("", breaks = c(0, 1, 2, 2.95),
## labels = c("Baseline", "Midpoint", "Post", "Follow-up ")) +
## scale_y_continuous("Insomnia Symptoms (Adjusted)",
## breaks = seq(5, 18, 1)) +
## annotate("text", x = 1, y = yval.sig, label = sig[1]) +
## annotate("text", x = 2, y = yval.sig, label = sig[2]) +
## annotate("text", x = 3, y = yval.sig, label = sig[3]) +
## annotate("text", x = 1, y = yval.es, label = ES[1]) +
## annotate("text", x = 2, y = yval.es, label = ES[2]) +
## annotate("text", x = 2.9, y = yval.es, label = ES[3]) +
## theme(legend.position = "none"))
## p.graphab <- set_palette(p.graphab, "jco")
## win.metafile(
## filename = "isi_forgraphicalabstract.emf",
## width = 4, height = 3.3)
## print(p.graphab)
## dev.off()
isidatCHANGE <- isidat[!is.na(ISI3), .(delta = (ISI3 / ISI1) - 1, dcond, id)][order(delta)]
isidatCHANGE[, dcond := factor(dcond, levels = 1:0, labels = c("CBT-I+Light", "TAU+"))]
t.test(delta ~ dcond, data = isidatCHANGE, var.equal=TRUE)
##
## Two Sample t-test
##
## data: delta by dcond
## t = -2.0887, df = 67, p-value = 0.04054
## alternative hypothesis: true difference in means between group CBT-I+Light and group TAU+ is not equal to 0
## 95 percent confidence interval:
## -0.67580182 -0.01533921
## sample estimates:
## mean in group CBT-I+Light mean in group TAU+
## -0.337669917 0.007900599
## proportion change < 0 = TRUE in CBT+ vs TAU+ conditions
prop.table(xtabs(~I(delta < 0) + dcond, data = isidatCHANGE), 2)
## dcond
## I(delta < 0) CBT-I+Light TAU+
## FALSE 0.1142857 0.3235294
## TRUE 0.8857143 0.6764706
## proportion -50%+ change = TRUE in CBT+ vs TAU+ conditions
prop.table(xtabs(~I(delta <= -.5) + dcond, data = isidatCHANGE), 2)
## dcond
## I(delta <= -0.5) CBT-I+Light TAU+
## FALSE 0.5142857 0.8529412
## TRUE 0.4857143 0.1470588
## proportion worsened > 0 change = TRUE in CBT+ vs TAU+ conditions
prop.table(xtabs(~I(delta > 0) + dcond, data = isidatCHANGE), 2)
## dcond
## I(delta > 0) CBT-I+Light TAU+
## FALSE 0.91428571 0.73529412
## TRUE 0.08571429 0.26470588
## ID for median change in TAU+ group
isidatCHANGE[dcond=="TAU+"][round(.N / 2), id]
## [1] 53
## ID for median change in CBT+ group
isidatCHANGE[dcond=="CBT-I+Light"][round(.N / 2), id]
## [1] 12
p.change.isi <- ggplot(isidatCHANGE, aes(factor(id, levels = id), delta, fill = dcond)) +
geom_bar(stat = "identity") +
## set factor(number) to the ID for TAU+ and then CBT+
geom_segment(aes(x = factor(53), xend = factor(53), y = -.7, yend = -.25),
arrow = arrow(length = unit(0.2,"cm"))) +
geom_segment(aes(x = factor(12), xend = factor(12), y = -.7, yend = -.55),
arrow = arrow(length = unit(0.2,"cm"))) +
scale_x_discrete(
"Individual Patients") +
scale_y_continuous(
"Percent Change from Baseline to Post", labels = percent) +
theme(axis.text.x = element_blank(), legend.title = element_blank(),
legend.position = c(.15, .9))
p.change.isi <- set_palette(p.change.isi, "jco") +
coord_cartesian(ylim = c(-.95, 1.5)) +
ggtitle("Insomnia Symptoms")
print(p.change.isi)
## save PDF of graph
pdf(file = "figure_3.pdf", width = 6, height = 5)
print(p.change.isi)
dev.off()
## png
## 2
tiff(file = "figure_3_greyscale.tif", width = 6, height = 5, units = "in", res = 300, compression = "lzw+p")
set_palette(ggplot(isidatCHANGE, aes(factor(id, levels = id), delta, fill = dcond)) +
geom_bar(stat = "identity") +
## set factor(number) to the ID for TAU+ and then CBT+
geom_segment(aes(x = factor(53), xend = factor(53), y = -.7, yend = -.25),
arrow = arrow(length = unit(0.2,"cm"))) +
geom_segment(aes(x = factor(12), xend = factor(12), y = -.7, yend = -.55),
arrow = arrow(length = unit(0.2,"cm"))) +
scale_x_discrete(
"Individual Patients") +
scale_y_continuous(
"Percent Change from Baseline to Post", labels = percent) +
theme(axis.text.x = element_blank(), legend.title = element_blank(),
legend.position = c(.15, .9)), "grey") +
coord_cartesian(ylim = c(-.95, 1.5)) +
ggtitle("Insomnia Symptoms")
dev.off()
## png
## 2
Sleep Related Impairment
sridat <- copy(reshape(d.all[, .(
ID, dcond, dstrat2, dstrat3, dstrat4, SRI_TScore, Survey)],
v.names = "SRI_TScore",
timevar = "Survey",
idvar = c("ID", "dcond", "dstrat2", "dstrat3", "dstrat4"),
direction = "wide"))
setnames(sridat, names(sridat),
c("id", "dcond", "dstrat2", "dstrat3", "dstrat4",
"SRI0", "SRI1", "SRI2", "SRI3", "SRI4"))
sridat <- sridat[!is.na(SRI1) | !is.na(SRI2) | !is.na(SRI3) | !is.na(SRI4)]
srifit <- mplusModeler(mplusObject(
TITLE = "Primary SRI Analysis;",
ANALYSIS = "ESTIMATOR = ML;",
MODEL = "
[SRI1 - SRI4@0]; ! fix residual intercepts to 0
SRI1* SRI2* SRI3* SRI4* (resvar); ! homogenous residual variance
int BY SRI1@1 SRI2@1 SRI3@1 SRI4@1;
slope BY SRI1@0 SRI2@0.5 SRI3@1 SRI4@1;
slope2 BY SRI1@0 SRI2@0 SRI3@0 SRI4@1;
int ON dstrat2 dstrat3 dstrat4 dcond@0 (i1 - i4);
slope ON dcond (s4);
slope2 ON dcond (s24);
[int* slope* slope2*] (rm1 - rm3);
int* slope* slope2*0 (rv1-rv3);
int WITH slope*0;
slope2 WITH int*0 slope*0;
",
OUTPUT = "STDYX; CINTERVAL;",
MODELCONSTRAINT = sprintf(
mcsetup,
mean(sridat$dstrat2),
mean(sridat$dstrat3),
mean(sridat$dstrat4)),
usevariables = c("SRI1", "SRI2", "SRI3", "SRI4",
"dstrat2", "dstrat3", "dstrat4", "dcond"),
rdata = as.data.frame(sridat)),
dataout = "01_sri.dat", run = TRUE, writeData = "always", hashfilename = FALSE)
## Wrote model to: 01_sri.inp
## Wrote data to: 01_sri.dat
## The file(s)
## '01_sri.dat'
## currently exist(s) and will be overwritten
## model information, coefficients, and effect sizes
summary(srifit)
## Primary SRI Analysis
##
## Estimated using ML
## Number of obs: 92, number of (free) parameters: 15
##
## Model: Chi2(df = 15) = 19.525, p = 0.1909
## Baseline model: Chi2(df = 22) = 151.977, p = 0
##
## Fit Indices:
##
## CFI = 0.965, TLI = 0.949, SRMR = 0.089
## RMSEA = 0.057, 90% CI [0, 0.121], p < .05 = 0.391
## AIC = 1941.407, BIC = 1979.234
cbind(
coef(srifit, param = c("reg", "exp", "new")),
confint(srifit, param = c("reg", "exp", "new"))[,-1])
## Label est se pval LowerCI UpperCI
## 13 INT<-DSTRAT2 0.837 2.811 0.766 -4.672 6.345
## 14 INT<-DSTRAT3 6.875 1.976 0.001 3.003 10.747
## 15 INT<-DSTRAT4 6.895 1.979 0.000 3.017 10.774
## 16 INT<-DCOND 0.000 0.000 999.000 0.000 0.000
## 17 SLOPE<-DCOND -4.364 1.669 0.009 -7.635 -1.092
## 18 SLOPE2<-DCOND 1.072 2.099 0.609 -3.041 5.186
## 22 SRI1<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 23 SRI2<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 24 SRI3<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 25 SRI4<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 26 INT<-Intercepts 51.439 1.639 0.000 48.227 54.652
## 27 SLOPE<-Intercepts -1.595 1.242 0.199 -4.029 0.839
## 28 SLOPE2<-Intercepts -3.008 1.576 0.056 -6.097 0.081
## 36 ADJREF 56.536 0.726 0.000 55.112 57.959
## 37 MTAU1 56.536 0.726 0.000 55.112 57.959
## 38 MTAU2 55.738 0.838 0.000 54.095 57.382
## 39 MTAU3 54.941 1.284 0.000 52.423 57.458
## 40 MTAU4 51.932 1.655 0.000 48.689 55.176
## 41 MCBT1 56.536 0.726 0.000 55.112 57.959
## 42 MCBT2 53.556 0.816 0.000 51.957 55.155
## 43 MCBT3 50.577 1.229 0.000 48.167 52.987
## 44 MCBT4 48.641 1.471 0.000 45.757 51.525
## 45 MDIF1 0.000 0.000 1.000 0.000 0.000
## 46 MDIF2 -2.182 0.835 0.009 -3.818 -0.546
## 47 MDIF3 -4.364 1.669 0.009 -7.635 -1.092
## 48 MDIF4 -3.292 2.097 0.116 -7.401 0.818
## 49 BD1 0.000 0.000 1.000 0.000 0.000
## 50 BD2 -0.303 0.118 0.010 -0.534 -0.072
## 51 BD3 -0.606 0.236 0.010 -1.068 -0.143
## 52 BD4 -0.457 0.293 0.119 -1.031 0.117
## 53 S1CBT -5.959 1.190 0.000 -8.291 -3.627
## 54 S2CBT -1.936 1.385 0.162 -4.651 0.779
## create all the data for the plot, including means, effect sizes, Ns
## confidence intervals and significance
sri.plotdat <- list()
sri.plotdat <- within(sri.plotdat, {
est <- as.data.table(cbind(coef(srifit), confint(srifit)[, -1]))[37:44, ]
est[, Condition := rep(c("TAU+", "CBT-I+Light"), each = 4)]
est[, Timepoint := c(0:3, 0:3)]
freq <- xtabs(~ condition + Survey, data = d.all[!is.na(SRI_Total)])
ES <- c(sprintf("ES = %s",
gsub("^0", "",
format(round(abs(subset(coef(srifit, param = c("new")),
grepl("BD", Label))$est), 2),
digits = 2, nsmall = 2))))[-1]
labels <- c(
sprintf("Baseline\nCBT-I+Light N: %d \nTAU+ N: %d ",
freq["CBT+", "Baseline"],
freq["Relaxation", "Baseline"]),
sprintf("Midpoint\n%d\n%d",
freq["CBT+", "Midpoint"],
freq["Relaxation", "Midpoint"]),
sprintf("Post\n%d\n%d",
freq["CBT+", "Post"],
freq["Relaxation", "Post"]),
sprintf("Follow-up\n%d\n%d",
freq["CBT+", "Follow-Up"],
freq["Relaxation", "Follow-Up"]))
sig <- symnum(subset(coef(srifit), grepl("MDIF", Label))$pval,
legend = FALSE, na = "",
cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*", "n.s."))[-1]
yval.sig <- with(est, max(UpperCI) + (abs(diff(range(c(LowerCI, UpperCI)))) * .02))
yval.es <- with(est, min(LowerCI) - (abs(diff(range(c(LowerCI, UpperCI)))) * .03))
})
## make the plot
p.sri <- with(sri.plotdat,
ggplot(est, aes(Timepoint + ifelse(Condition=="CBT-I+Light", .03, -.03),
est, ymin = LowerCI, ymax = UpperCI,
colour = Condition, shape = Condition)) +
geom_hline(yintercept = 50, linetype = 3, colour = "grey50") +
geom_line(size = .1, alpha = .5) + geom_pointrange(size=1) +
scale_x_continuous("", breaks = 0:3, labels = labels) +
scale_y_continuous("Sleep Related Impairment (Adjusted)",
breaks = seq(40, 60, 2)) +
annotate("text", x = 1, y = yval.sig, label = sig[1]) +
annotate("text", x = 2, y = yval.sig, label = sig[2]) +
annotate("text", x = 3, y = yval.sig, label = sig[3]) +
annotate("text", x = 1, y = yval.es, label = ES[1]) +
annotate("text", x = 2, y = yval.es, label = ES[2]) +
annotate("text", x = 3, y = yval.es, label = ES[3]))
p.sri <- set_palette(p.sri, "jco")
print(p.sri)
Sleep Disturbance Symptoms
sddat <- copy(reshape(d.all[, .(
ID, dcond, dstrat2, dstrat3, dstrat4, SD_TScore, Survey)],
v.names = "SD_TScore",
timevar = "Survey",
idvar = c("ID", "dcond", "dstrat2", "dstrat3", "dstrat4"),
direction = "wide"))
setnames(sddat, names(sddat),
c("id", "dcond", "dstrat2", "dstrat3", "dstrat4",
"SD0", "SD1", "SD2", "SD3", "SD4"))
sddat <- sddat[!is.na(SD1) | !is.na(SD2) | !is.na(SD3) | !is.na(SD4)]
sdfit <- mplusModeler(mplusObject(
TITLE = "Primary SD Analysis;",
ANALYSIS = "ESTIMATOR = ML;",
MODEL = "
[SD1 - SD4@0]; ! fix residual intercepts to 0
SD1* SD2* SD3* SD4* (resvar); ! homogenous residual variance
int BY SD1@1 SD2@1 SD3@1 SD4@1;
slope BY SD1@0 SD2@0.5 SD3@1 SD4@1;
slope2 BY SD1@0 SD2@0 SD3@0 SD4@1;
int ON dstrat2 dstrat3 dstrat4 dcond@0 (i1 - i4);
slope ON dcond (s4);
slope2 ON dcond (s24);
[int* slope* slope2*] (rm1 - rm3);
int* slope* slope2*0 (rv1-rv3);
int WITH slope*0;
slope2 WITH int*0 slope*0;
",
OUTPUT = "STDYX; CINTERVAL;",
MODELCONSTRAINT = sprintf(
mcsetup,
mean(sddat$dstrat2),
mean(sddat$dstrat3),
mean(sddat$dstrat4)),
usevariables = c("SD1", "SD2", "SD3", "SD4",
"dstrat2", "dstrat3", "dstrat4", "dcond"),
rdata = as.data.frame(sddat)),
dataout = "01_sd.dat", run = TRUE, writeData = "always", hashfilename = FALSE)
## Wrote model to: 01_sd.inp
## Wrote data to: 01_sd.dat
## The file(s)
## '01_sd.dat'
## currently exist(s) and will be overwritten
## model information, coefficients, and effect sizes
summary(sdfit)
## Primary SD Analysis
##
## Estimated using ML
## Number of obs: 92, number of (free) parameters: 15
##
## Model: Chi2(df = 15) = 31.335, p = 0.0079
## Baseline model: Chi2(df = 22) = 140.064, p = 0
##
## Fit Indices:
##
## CFI = 0.862, TLI = 0.797, SRMR = 0.215
## RMSEA = 0.109, 90% CI [0.054, 0.162], p < .05 = 0.041
## AIC = 1954.468, BIC = 1992.295
cbind(
coef(sdfit, param = c("reg", "exp", "new")),
confint(sdfit, param = c("reg", "exp", "new"))[,-1])
## Label est se pval LowerCI UpperCI
## 13 INT<-DSTRAT2 1.000 2.690 0.710 -4.272 6.272
## 14 INT<-DSTRAT3 8.657 1.995 0.000 4.747 12.567
## 15 INT<-DSTRAT4 7.799 1.984 0.000 3.911 11.688
## 16 INT<-DCOND 0.000 0.000 999.000 0.000 0.000
## 17 SLOPE<-DCOND -3.013 1.751 0.085 -6.445 0.420
## 18 SLOPE2<-DCOND 2.763 2.426 0.255 -1.991 7.518
## 22 SD1<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 23 SD2<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 24 SD3<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 25 SD4<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 26 INT<-Intercepts 48.651 1.637 0.000 45.442 51.859
## 27 SLOPE<-Intercepts -2.524 1.309 0.054 -5.090 0.042
## 28 SLOPE2<-Intercepts -2.907 1.818 0.110 -6.470 0.656
## 36 ADJREF 54.746 0.733 0.000 53.310 56.182
## 37 MTAU1 54.746 0.733 0.000 53.310 56.182
## 38 MTAU2 53.484 0.850 0.000 51.817 55.150
## 39 MTAU3 52.222 1.329 0.000 49.617 54.827
## 40 MTAU4 49.315 1.668 0.000 46.045 52.585
## 41 MCBT1 54.746 0.733 0.000 53.310 56.182
## 42 MCBT2 51.977 0.826 0.000 50.359 53.596
## 43 MCBT3 49.209 1.273 0.000 46.714 51.704
## 44 MCBT4 49.066 1.458 0.000 46.208 51.924
## 45 MDIF1 0.000 0.000 1.000 0.000 0.000
## 46 MDIF2 -1.506 0.876 0.085 -3.223 0.210
## 47 MDIF3 -3.013 1.751 0.085 -6.445 0.420
## 48 MDIF4 -0.249 2.148 0.908 -4.459 3.960
## 49 BD1 0.000 0.000 1.000 0.000 0.000
## 50 BD2 -0.208 0.122 0.088 -0.448 0.031
## 51 BD3 -0.417 0.245 0.088 -0.896 0.062
## 52 BD4 -0.034 0.297 0.908 -0.617 0.548
## 53 S1CBT -5.537 1.260 0.000 -8.006 -3.068
## 54 S2CBT -0.144 1.612 0.929 -3.303 3.016
## create all the data for the plot, including means, effect sizes, Ns
## confidence intervals and significance
sd.plotdat <- list()
sd.plotdat <- within(sd.plotdat, {
est <- as.data.table(cbind(coef(sdfit), confint(sdfit)[, -1]))[37:44, ]
est[, Condition := rep(c("TAU+", "CBT-I+Light"), each = 4)]
est[, Timepoint := c(0:3, 0:3)]
freq <- xtabs(~ condition + Survey, data = d.all[!is.na(SD_Total)])
ES <- c(sprintf("ES = %s",
gsub("^0", "",
format(round(abs(subset(coef(sdfit, param = c("new")),
grepl("BD", Label))$est), 2),
digits = 2, nsmall = 2))))[-1]
labels <- c(
sprintf("Baseline\nCBT-I+Light N: %d \nTAU+ N: %d ",
freq["CBT+", "Baseline"],
freq["Relaxation", "Baseline"]),
sprintf("Midpoint\n%d\n%d",
freq["CBT+", "Midpoint"],
freq["Relaxation", "Midpoint"]),
sprintf("Post\n%d\n%d",
freq["CBT+", "Post"],
freq["Relaxation", "Post"]),
sprintf("Follow-up\n%d\n%d",
freq["CBT+", "Follow-Up"],
freq["Relaxation", "Follow-Up"]))
sig <- symnum(subset(coef(sdfit), grepl("MDIF", Label))$pval,
legend = FALSE, na = "",
cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*", "n.s."))[-1]
yval.sig <- with(est, max(UpperCI) + (abs(diff(range(c(LowerCI, UpperCI)))) * .02))
yval.es <- with(est, min(LowerCI) - (abs(diff(range(c(LowerCI, UpperCI)))) * .03))
})
## make the plot
p.sd <- with(sd.plotdat,
ggplot(est, aes(Timepoint + ifelse(Condition=="CBT-I+Light", .03, -.03),
est, ymin = LowerCI, ymax = UpperCI,
colour = Condition, shape = Condition)) +
geom_hline(yintercept = 50, linetype = 3, colour = "grey50") +
geom_line(size = .1, alpha = .5) + geom_pointrange(size=1) +
scale_x_continuous("", breaks = 0:3, labels = labels) +
scale_y_continuous("Sleep Disturbance (Adjusted)",
breaks = seq(40, 58, 2)) +
annotate("text", x = 1, y = yval.sig, label = sig[1]) +
annotate("text", x = 2, y = yval.sig, label = sig[2]) +
annotate("text", x = 3, y = yval.sig, label = sig[3]) +
annotate("text", x = 1, y = yval.es, label = ES[1]) +
annotate("text", x = 2, y = yval.es, label = ES[2]) +
annotate("text", x = 3, y = yval.es, label = ES[3]))
p.sd <- set_palette(p.sd, "jco")
print(p.sd)
Fatigue Symptoms
fadat <- copy(reshape(d.all[, .(
ID, dcond, dstrat2, dstrat3, dstrat4, FA_TScore, Survey)],
v.names = "FA_TScore",
timevar = "Survey",
idvar = c("ID", "dcond", "dstrat2", "dstrat3", "dstrat4"),
direction = "wide"))
setnames(fadat, names(fadat),
c("id", "dcond", "dstrat2", "dstrat3", "dstrat4",
"FA0", "FA1", "FA2", "FA3", "FA4"))
fadat <- fadat[!is.na(FA1) | !is.na(FA2) | !is.na(FA3) | !is.na(FA4)]
fafit <- mplusModeler(mplusObject(
TITLE = "Primary FA Analysis;",
ANALYSIS = "ESTIMATOR = ML;",
MODEL = "
[FA1 - FA4@0]; ! fix residual intercepts to 0
FA1* FA2* FA3* FA4* (resvar); ! homogenous residual variance
int BY FA1@1 FA2@1 FA3@1 FA4@1;
slope BY FA1@0 FA2@0.5 FA3@1 FA4@1;
slope2 BY FA1@0 FA2@0 FA3@0 FA4@1;
int ON dstrat2 dstrat3 dstrat4 dcond@0 (i1 - i4);
slope ON dcond (s4);
slope2 ON dcond (s24);
[int* slope* slope2*] (rm1 - rm3);
int* slope* slope2*0 (rv1-rv3);
int WITH slope*0;
slope2 WITH int*0 slope*0;
",
OUTPUT = "STDYX; CINTERVAL;",
MODELCONSTRAINT = sprintf(
mcsetup,
mean(fadat$dstrat2),
mean(fadat$dstrat3),
mean(fadat$dstrat4)),
usevariables = c("FA1", "FA2", "FA3", "FA4",
"dstrat2", "dstrat3", "dstrat4", "dcond"),
rdata = as.data.frame(fadat)),
dataout = "01_fa.dat", run = TRUE, writeData = "always", hashfilename = FALSE)
## Wrote model to: 01_fa.inp
## Wrote data to: 01_fa.dat
## The file(s)
## '01_fa.dat'
## currently exist(s) and will be overwritten
## model information, coefficients, and effect sizes
summary(fafit)
## Primary FA Analysis
##
## Estimated using ML
## Number of obs: 92, number of (free) parameters: 15
##
## Model: Chi2(df = 15) = 26.482, p = 0.0333
## Baseline model: Chi2(df = 22) = 155.76, p = 0
##
## Fit Indices:
##
## CFI = 0.914, TLI = 0.874, SRMR = 0.139
## RMSEA = 0.091, 90% CI [0.026, 0.147], p < .05 = 0.118
## AIC = 1946.766, BIC = 1984.593
cbind(
coef(fafit, param = c("reg", "exp", "new")),
confint(fafit, param = c("reg", "exp", "new"))[,-1])
## Label est se pval LowerCI UpperCI
## 13 INT<-DSTRAT2 2.574 2.635 0.329 -2.591 7.739
## 14 INT<-DSTRAT3 9.590 1.871 0.000 5.924 13.257
## 15 INT<-DSTRAT4 8.658 1.917 0.000 4.901 12.416
## 16 INT<-DCOND 0.000 0.000 999.000 0.000 0.000
## 17 SLOPE<-DCOND -4.170 1.682 0.013 -7.467 -0.872
## 18 SLOPE2<-DCOND 0.871 2.244 0.698 -3.529 5.270
## 22 FA1<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 23 FA2<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 24 FA3<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 25 FA4<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 26 INT<-Intercepts 51.335 1.573 0.000 48.252 54.419
## 27 SLOPE<-Intercepts -0.494 1.235 0.689 -2.914 1.926
## 28 SLOPE2<-Intercepts -3.372 1.722 0.050 -6.747 0.002
## 36 ADJREF 58.238 0.711 0.000 56.844 59.631
## 37 MTAU1 58.238 0.711 0.000 56.844 59.631
## 38 MTAU2 57.990 0.861 0.000 56.302 59.679
## 39 MTAU3 57.743 1.319 0.000 55.157 60.330
## 40 MTAU4 54.371 1.788 0.000 50.867 57.875
## 41 MCBT1 58.238 0.711 0.000 56.844 59.631
## 42 MCBT2 55.906 0.839 0.000 54.262 57.549
## 43 MCBT3 53.574 1.264 0.000 51.096 56.052
## 44 MCBT4 51.072 1.586 0.000 47.963 54.181
## 45 MDIF1 0.000 0.000 1.000 0.000 0.000
## 46 MDIF2 -2.085 0.841 0.013 -3.734 -0.436
## 47 MDIF3 -4.170 1.682 0.013 -7.467 -0.872
## 48 MDIF4 -3.299 2.375 0.165 -7.954 1.356
## 49 BD1 0.000 0.000 1.000 0.000 0.000
## 50 BD2 -0.296 0.121 0.015 -0.533 -0.058
## 51 BD3 -0.591 0.243 0.015 -1.067 -0.116
## 52 BD4 -0.468 0.339 0.167 -1.132 0.196
## 53 S1CBT -4.664 1.181 0.000 -6.979 -2.349
## 54 S2CBT -2.502 1.522 0.100 -5.485 0.482
## create all the data for the plot, including means, effect sizes, Ns
## confidence intervals and significance
fa.plotdat <- list()
fa.plotdat <- within(fa.plotdat, {
est <- as.data.table(cbind(coef(fafit), confint(fafit)[, -1]))[37:44, ]
est[, Condition := rep(c("TAU+", "CBT-I+Light"), each = 4)]
est[, Timepoint := c(0:3, 0:3)]
freq <- xtabs(~ condition + Survey, data = d.all[!is.na(FA_Total)])
ES <- c(sprintf("ES = %s",
gsub("^0", "",
format(round(abs(subset(coef(fafit, param = c("new")),
grepl("BD", Label))$est), 2),
digits = 2, nsmall = 2))))[-1]
labels <- c(
sprintf("Baseline\nCBT-I+Light N: %d \nTAU+ N: %d ",
freq["CBT+", "Baseline"],
freq["Relaxation", "Baseline"]),
sprintf("Midpoint\n%d\n%d",
freq["CBT+", "Midpoint"],
freq["Relaxation", "Midpoint"]),
sprintf("Post\n%d\n%d",
freq["CBT+", "Post"],
freq["Relaxation", "Post"]),
sprintf("Follow-up\n%d\n%d",
freq["CBT+", "Follow-Up"],
freq["Relaxation", "Follow-Up"]))
sig <- symnum(subset(coef(fafit), grepl("MDIF", Label))$pval,
legend = FALSE, na = "",
cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*", "n.s."))[-1]
yval.sig <- with(est, max(UpperCI) + (abs(diff(range(c(LowerCI, UpperCI)))) * .02))
yval.es <- with(est, min(LowerCI) - (abs(diff(range(c(LowerCI, UpperCI)))) * .03))
})
## make the plot
p.fa <- with(fa.plotdat,
ggplot(est, aes(Timepoint + ifelse(Condition=="CBT-I+Light", .03, -.03),
est, ymin = LowerCI, ymax = UpperCI,
colour = Condition, shape = Condition)) +
geom_hline(yintercept = 50, linetype = 3, colour = "grey50") +
geom_line(size = .1, alpha = .5) + geom_pointrange(size=1) +
scale_x_continuous("", breaks = 0:3, labels = labels) +
scale_y_continuous("Fatigue Symptoms (Adjusted)",
breaks = seq(46, 62, 2)) +
annotate("text", x = 1, y = yval.sig, label = sig[1]) +
annotate("text", x = 2, y = yval.sig, label = sig[2]) +
annotate("text", x = 3, y = yval.sig, label = sig[3]) +
annotate("text", x = 1, y = yval.es, label = ES[1]) +
annotate("text", x = 2, y = yval.es, label = ES[2]) +
annotate("text", x = 3, y = yval.es, label = ES[3]))
p.fa <- set_palette(p.fa, "jco")
print(p.fa)
Depression Symptoms
depdat <- copy(reshape(d.all[, .(
ID, dcond, dstrat2, dstrat3, dstrat4, DEP_TScore, Survey)],
v.names = "DEP_TScore",
timevar = "Survey",
idvar = c("ID", "dcond", "dstrat2", "dstrat3", "dstrat4"),
direction = "wide"))
setnames(depdat, names(depdat),
c("id", "dcond", "dstrat2", "dstrat3", "dstrat4",
"DEP0", "DEP1", "DEP2", "DEP3", "DEP4"))
depdat <- depdat[!is.na(DEP1) | !is.na(DEP2) | !is.na(DEP3) | !is.na(DEP4)]
depfit <- mplusModeler(mplusObject(
TITLE = "Primary DEP Analysis;",
ANALYSIS = "ESTIMATOR = ML;",
MODEL = "
[DEP1 - DEP4@0]; ! fix residual intercepts to 0
DEP1* DEP2* DEP3* DEP4* (resvar); ! homogenous residual variance
int BY DEP1@1 DEP2@1 DEP3@1 DEP4@1;
slope BY DEP1@0 DEP2@0.5 DEP3@1 DEP4@1;
slope2 BY DEP1@0 DEP2@0 DEP3@0 DEP4@1;
int ON dstrat2 dstrat3 dstrat4 dcond@0 (i1 - i4);
slope ON dcond (s4);
slope2 ON dcond (s24);
[int* slope* slope2*] (rm1 - rm3);
int* slope* slope2*0 (rv1-rv3);
int WITH slope*0;
slope2 WITH int*0 slope*0;
",
OUTPUT = "STDYX; CINTERVAL;",
MODELCONSTRAINT = sprintf(
mcsetup,
mean(depdat$dstrat2),
mean(depdat$dstrat3),
mean(depdat$dstrat4)),
usevariables = c("DEP1", "DEP2", "DEP3", "DEP4",
"dstrat2", "dstrat3", "dstrat4", "dcond"),
rdata = as.data.frame(depdat)),
dataout = "01_dep.dat", run = TRUE, writeData = "always", hashfilename = FALSE)
## Wrote model to: 01_dep.inp
## Wrote data to: 01_dep.dat
## The file(s)
## '01_dep.dat'
## currently exist(s) and will be overwritten
## model information, coefficients, and effect sizes
summary(depfit)
## Primary DEP Analysis
##
## Estimated using ML
## Number of obs: 92, number of (free) parameters: 15
##
## Model: Chi2(df = 15) = 14.945, p = 0.4554
## Baseline model: Chi2(df = 22) = 185.28, p = 0
##
## Fit Indices:
##
## CFI = 1, TLI = 1, SRMR = 0.071
## RMSEA = 0, 90% CI [0, 0.098], p < .05 = 0.672
## AIC = 1938.353, BIC = 1976.18
cbind(
coef(depfit, param = c("reg", "exp", "new")),
confint(depfit, param = c("reg", "exp", "new"))[,-1])
## Label est se pval LowerCI UpperCI
## 13 INT<-DSTRAT2 2.429 3.360 0.470 -4.155 9.014
## 14 INT<-DSTRAT3 7.970 2.393 0.001 3.280 12.660
## 15 INT<-DSTRAT4 5.350 2.397 0.026 0.652 10.048
## 16 INT<-DCOND 0.000 0.000 999.000 0.000 0.000
## 17 SLOPE<-DCOND 1.599 1.620 0.324 -1.576 4.774
## 18 SLOPE2<-DCOND -2.008 1.825 0.271 -5.586 1.570
## 22 DEP1<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 23 DEP2<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 24 DEP3<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 25 DEP4<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 26 INT<-Intercepts 45.898 1.993 0.000 41.991 49.806
## 27 SLOPE<-Intercepts -2.469 1.213 0.042 -4.848 -0.091
## 28 SLOPE2<-Intercepts -0.104 1.363 0.939 -2.775 2.568
## 36 ADJREF 51.000 0.897 0.000 49.243 52.758
## 37 MTAU1 51.000 0.897 0.000 49.243 52.758
## 38 MTAU2 49.766 0.945 0.000 47.913 51.618
## 39 MTAU3 48.531 1.311 0.000 45.962 51.100
## 40 MTAU4 48.427 1.422 0.000 45.641 51.214
## 41 MCBT1 51.000 0.897 0.000 49.243 52.758
## 42 MCBT2 50.565 0.925 0.000 48.752 52.378
## 43 MCBT3 50.130 1.257 0.000 47.666 52.595
## 44 MCBT4 48.018 1.283 0.000 45.504 50.533
## 45 MDIF1 0.000 0.000 1.000 0.000 0.000
## 46 MDIF2 0.800 0.810 0.324 -0.788 2.387
## 47 MDIF3 1.599 1.620 0.324 -1.576 4.774
## 48 MDIF4 -0.409 1.690 0.809 -3.722 2.904
## 49 BD1 0.000 0.000 1.000 0.000 0.000
## 50 BD2 0.091 0.093 0.324 -0.090 0.273
## 51 BD3 0.183 0.185 0.324 -0.181 0.546
## 52 BD4 -0.047 0.193 0.809 -0.425 0.332
## 53 S1CBT -0.870 1.162 0.454 -3.147 1.406
## 54 S2CBT -2.112 1.212 0.082 -4.488 0.264
## create all the data for the plot, including means, effect sizes, Ns
## confidence intervals and significance
dep.plotdat <- list()
dep.plotdat <- within(dep.plotdat, {
est <- as.data.table(cbind(coef(depfit), confint(depfit)[, -1]))[37:44, ]
est[, Condition := rep(c("TAU+", "CBT-I+Light"), each = 4)]
est[, Timepoint := c(0:3, 0:3)]
freq <- xtabs(~ condition + Survey, data = d.all[!is.na(DEP_Total)])
ES <- c(sprintf("ES = %s",
gsub("^0", "",
format(round(abs(subset(coef(depfit, param = c("new")),
grepl("BD", Label))$est), 2),
digits = 2, nsmall = 2))))[-1]
labels <- c(
sprintf("Baseline\nCBT-I+Light N: %d \nTAU+ N: %d ",
freq["CBT+", "Baseline"],
freq["Relaxation", "Baseline"]),
sprintf("Midpoint\n%d\n%d",
freq["CBT+", "Midpoint"],
freq["Relaxation", "Midpoint"]),
sprintf("Post\n%d\n%d",
freq["CBT+", "Post"],
freq["Relaxation", "Post"]),
sprintf("Follow-up\n%d\n%d",
freq["CBT+", "Follow-Up"],
freq["Relaxation", "Follow-Up"]))
sig <- symnum(subset(coef(depfit), grepl("MDIF", Label))$pval,
legend = FALSE, na = "",
cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*", "n.s."))[-1]
yval.sig <- with(est, max(UpperCI) + (abs(diff(range(c(LowerCI, UpperCI)))) * .02))
yval.es <- with(est, min(LowerCI) - (abs(diff(range(c(LowerCI, UpperCI)))) * .03))
})
## make the plot
p.dep <- with(dep.plotdat,
ggplot(est, aes(Timepoint + ifelse(Condition=="CBT-I+Light", .03, -.03),
est, ymin = LowerCI, ymax = UpperCI,
colour = Condition, shape = Condition)) +
geom_hline(yintercept = 50, linetype = 3, colour = "grey50") +
geom_line(size = .1, alpha = .5) + geom_pointrange(size=1) +
scale_x_continuous("", breaks = 0:3, labels = labels) +
scale_y_continuous("Depressive Symptoms (Adjusted)",
breaks = seq(44, 54, 2)) +
annotate("text", x = 1, y = yval.sig, label = sig[1]) +
annotate("text", x = 2, y = yval.sig, label = sig[2]) +
annotate("text", x = 3, y = yval.sig, label = sig[3]) +
annotate("text", x = 1, y = yval.es, label = ES[1]) +
annotate("text", x = 2, y = yval.es, label = ES[2]) +
annotate("text", x = 3, y = yval.es, label = ES[3]))
p.dep <- set_palette(p.dep, "jco")
print(p.dep)
Anxiety Symptoms
anxdat <- copy(reshape(d.all[, .(
ID, dcond, dstrat2, dstrat3, dstrat4, ANX_TScore, Survey)],
v.names = "ANX_TScore",
timevar = "Survey",
idvar = c("ID", "dcond", "dstrat2", "dstrat3", "dstrat4"),
direction = "wide"))
setnames(anxdat, names(anxdat),
c("id", "dcond", "dstrat2", "dstrat3", "dstrat4",
"ANX0", "ANX1", "ANX2", "ANX3", "ANX4"))
anxdat <- anxdat[!is.na(ANX1) | !is.na(ANX2) | !is.na(ANX3) | !is.na(ANX4)]
anxfit <- mplusModeler(mplusObject(
TITLE = "Primary ANX Analysis;",
ANALYSIS = "ESTIMATOR = ML;",
MODEL = "
[ANX1 - ANX4@0]; ! fix residual intercepts to 0
ANX1* ANX2* ANX3* ANX4* (resvar); ! homogenous residual variance
int BY ANX1@1 ANX2@1 ANX3@1 ANX4@1;
slope BY ANX1@0 ANX2@0.5 ANX3@1 ANX4@1;
slope2 BY ANX1@0 ANX2@0 ANX3@0 ANX4@1;
int ON dstrat2 dstrat3 dstrat4 dcond@0 (i1 - i4);
slope ON dcond (s4);
slope2 ON dcond (s24);
[int* slope* slope2*] (rm1 - rm3);
int* slope* slope2*0 (rv1-rv3);
int WITH slope*0;
slope2 WITH int*0 slope*0;
",
OUTPUT = "STDYX; CINTERVAL;",
MODELCONSTRAINT = sprintf(
mcsetup,
mean(anxdat$dstrat2),
mean(anxdat$dstrat3),
mean(anxdat$dstrat4)),
usevariables = c("ANX1", "ANX2", "ANX3", "ANX4",
"dstrat2", "dstrat3", "dstrat4", "dcond"),
rdata = as.data.frame(anxdat)),
dataout = "01_anx.dat", run = TRUE, writeData = "always", hashfilename = FALSE)
## Wrote model to: 01_anx.inp
## Wrote data to: 01_anx.dat
## The file(s)
## '01_anx.dat'
## currently exist(s) and will be overwritten
## model information, coefficients, and effect sizes
summary(anxfit)
## Primary ANX Analysis
##
## Estimated using ML
## Number of obs: 92, number of (free) parameters: 15
##
## Model: Chi2(df = 15) = 19.479, p = 0.1928
## Baseline model: Chi2(df = 22) = 173.708, p = 0
##
## Fit Indices:
##
## CFI = 0.97, TLI = 0.957, SRMR = 0.098
## RMSEA = 0.057, 90% CI [0, 0.121], p < .05 = 0.394
## AIC = 1990.773, BIC = 2028.599
cbind(
coef(anxfit, param = c("reg", "exp", "new")),
confint(anxfit, param = c("reg", "exp", "new"))[,-1])
## Label est se pval LowerCI UpperCI
## 13 INT<-DSTRAT2 2.987 3.479 0.390 -3.831 9.806
## 14 INT<-DSTRAT3 6.926 2.529 0.006 1.969 11.883
## 15 INT<-DSTRAT4 5.660 2.503 0.024 0.755 10.565
## 16 INT<-DCOND 0.000 0.000 999.000 0.000 0.000
## 17 SLOPE<-DCOND 1.574 1.752 0.369 -1.860 5.009
## 18 SLOPE2<-DCOND -2.923 2.276 0.199 -7.384 1.538
## 22 ANX1<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 23 ANX2<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 24 ANX3<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 25 ANX4<-Intercepts 0.000 0.000 999.000 0.000 0.000
## 26 INT<-Intercepts 47.360 2.086 0.000 43.272 51.448
## 27 SLOPE<-Intercepts -2.581 1.303 0.048 -5.134 -0.028
## 28 SLOPE2<-Intercepts -0.186 1.701 0.913 -3.519 3.148
## 36 ADJREF 52.242 0.937 0.000 50.406 54.078
## 37 MTAU1 52.242 0.937 0.000 50.406 54.078
## 38 MTAU2 50.952 1.009 0.000 48.974 52.929
## 39 MTAU3 49.661 1.417 0.000 46.885 52.437
## 40 MTAU4 49.475 1.637 0.000 46.267 52.684
## 41 MCBT1 52.242 0.937 0.000 50.406 54.078
## 42 MCBT2 51.739 0.984 0.000 49.810 53.668
## 43 MCBT3 51.235 1.351 0.000 48.588 53.882
## 44 MCBT4 48.127 1.440 0.000 45.305 50.948
## 45 MDIF1 0.000 0.000 1.000 0.000 0.000
## 46 MDIF2 0.787 0.876 0.369 -0.930 2.504
## 47 MDIF3 1.574 1.752 0.369 -1.860 5.009
## 48 MDIF4 -1.348 1.994 0.499 -5.258 2.561
## 49 BD1 0.000 0.000 1.000 0.000 0.000
## 50 BD2 0.086 0.096 0.370 -0.102 0.274
## 51 BD3 0.172 0.192 0.370 -0.204 0.547
## 52 BD4 -0.147 0.218 0.499 -0.574 0.280
## 53 S1CBT -1.007 1.237 0.416 -3.430 1.417
## 54 S2CBT -3.108 1.492 0.037 -6.033 -0.184
## create all the data for the plot, including means, effect sizes, Ns
## confidence intervals and significance
anx.plotdat <- list()
anx.plotdat <- within(anx.plotdat, {
est <- as.data.table(cbind(coef(anxfit), confint(anxfit)[, -1]))[37:44, ]
est[, Condition := rep(c("TAU+", "CBT-I+Light"), each = 4)]
est[, Timepoint := c(0:3, 0:3)]
freq <- xtabs(~ condition + Survey, data = d.all[!is.na(ANX_Total)])
ES <- c(sprintf("ES = %s",
gsub("^0", "",
format(round(abs(subset(coef(anxfit, param = c("new")),
grepl("BD", Label))$est), 2),
digits = 2, nsmall = 2))))[-1]
labels <- c(
sprintf("Baseline\nCBT-I+Light N: %d \nTAU+ N: %d ",
freq["CBT+", "Baseline"],
freq["Relaxation", "Baseline"]),
sprintf("Midpoint\n%d\n%d",
freq["CBT+", "Midpoint"],
freq["Relaxation", "Midpoint"]),
sprintf("Post\n%d\n%d",
freq["CBT+", "Post"],
freq["Relaxation", "Post"]),
sprintf("Follow-up\n%d\n%d",
freq["CBT+", "Follow-Up"],
freq["Relaxation", "Follow-Up"]))
sig <- symnum(subset(coef(anxfit), grepl("MDIF", Label))$pval,
legend = FALSE, na = "",
cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*", "n.s."))[-1]
yval.sig <- with(est, max(UpperCI) + (abs(diff(range(c(LowerCI, UpperCI)))) * .02))
yval.es <- with(est, min(LowerCI) - (abs(diff(range(c(LowerCI, UpperCI)))) * .03))
})
## make the plot
p.anx <- with(anx.plotdat,
ggplot(est, aes(Timepoint + ifelse(Condition=="CBT-I+Light", .03, -.03),
est, ymin = LowerCI, ymax = UpperCI,
colour = Condition, shape = Condition)) +
geom_hline(yintercept = 50, linetype = 3, colour = "grey50") +
geom_line(size = .1, alpha = .5) + geom_pointrange(size=1) +
scale_x_continuous("", breaks = 0:3, labels = labels) +
scale_y_continuous("Anxiety Symptoms (Adjusted)",
breaks = seq(40, 56, 2)) +
annotate("text", x = 1, y = yval.sig, label = sig[1]) +
annotate("text", x = 2, y = yval.sig, label = sig[2]) +
annotate("text", x = 3, y = yval.sig, label = sig[3]) +
annotate("text", x = 1, y = yval.es, label = ES[1]) +
annotate("text", x = 2, y = yval.es, label = ES[2]) +
annotate("text", x = 3, y = yval.es, label = ES[3]))
p.anx <- set_palette(p.anx, "jco")
print(p.anx)
Final Panel Plot
p.all <- ggarrange(
p.isi + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off"),
p.sri + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off"),
p.sd + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off"),
p.fa + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off"),
p.dep + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off"),
p.anx + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off"),
labels = c("A", "B", "C", "D", "E", "F"),
ncol=2, nrow=3,
legend = "bottom",
common.legend = TRUE)
print(p.all)
pdf(file = "figure2.pdf", width = 9, height = 12)
print(p.all)
dev.off()
## png
## 2
p.allgrey <- ggarrange(
p.isi + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off") + color_palette("grey"),
p.sri + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off") + color_palette("grey"),
p.sd + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off") + color_palette("grey"),
p.fa + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off") + color_palette("grey"),
p.dep + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off") + color_palette("grey"),
p.anx + theme(plot.margin = margin(2, 14, 0, 32, "pt")) + coord_cartesian(clip="off") + color_palette("grey"),
labels = c("A", "B", "C", "D", "E", "F"),
ncol=2, nrow=3,
legend = "bottom",
common.legend = TRUE)
## Scale for 'colour' is already present. Adding another scale for 'colour',
## which will replace the existing scale.
## Scale for 'colour' is already present. Adding another scale for 'colour',
## which will replace the existing scale.
## Scale for 'colour' is already present. Adding another scale for 'colour',
## which will replace the existing scale.
## Scale for 'colour' is already present. Adding another scale for 'colour',
## which will replace the existing scale.
## Scale for 'colour' is already present. Adding another scale for 'colour',
## which will replace the existing scale.
## Scale for 'colour' is already present. Adding another scale for 'colour',
## which will replace the existing scale.
tiff(file = "figure2_greyscale.tif", width = 9, height = 12, units = "in", res = 300, compression = "lzw+p")
print(p.allgrey)
dev.off()
## png
## 2
Supplementary Analyses
## dropout differences by characteristics at screening (just stratification factors)
egltable(c(
"baseline_stage_high", "baseline_isi_high", "condition"),
g = "Dropout",
data = d.all[Survey=="Screening"])
## Completer N (%) Dropout N (%)
## 1: baseline_stage_high
## 2: Stage >= 3 33 (45.2) 16 (57.1)
## 3: Stage <= 2 40 (54.8) 12 (42.9)
## 4: baseline_isi_high
## 5: ISI >= 8 51 (69.9) 23 (82.1)
## 6: ISI <= 7 22 (30.1) 5 (17.9)
## 7: condition
## 8: Relaxation 34 (46.6) 16 (57.1)
## 9: CBT+ 39 (53.4) 12 (42.9)
## Test
## 1: Chi-square = 1.15, df = 1, p = .283, Phi = 0.11
## 2:
## 3:
## 4: Chi-square = 1.56, df = 1, p = .212, Phi = 0.12
## 5:
## 6:
## 7: Chi-square = 0.90, df = 1, p = .342, Phi = 0.09
## 8:
## 9:
## dropout differences by baseline mental health
egltable(c(
"DEP_TScore", "ANX_TScore"),
g = "Dropout",
data = d.all[Survey=="Baseline"])
## Completer M (SD) Dropout M (SD)
## 1: DEP_TScore 50.51 (9.25) 52.68 (9.51)
## 2: ANX_TScore 51.84 (9.65) 54.29 (9.28)
## Test
## 1: t(df=89) = -0.91, p = .366, d = 0.23
## 2: t(df=89) = -0.99, p = .324, d = 0.26
## sleep tx at post
egltable(c(
"GRP", "PSY", "MED", "PSYSL", "MEDSL", "HERSL"),
g = "condition",
data = droplevels(d.all[Survey == "Post"]),
parametric = TRUE)[, c(1, 3, 2, 4)]
## Warning in chisq.test(tabs, correct = FALSE, simulate.p.value = simChisq, : Chi-
## squared approximation may be incorrect
## Warning in chisq.test(tabs, correct = FALSE, simulate.p.value = simChisq, : Chi-
## squared approximation may be incorrect
## CBT+ N (%) Relaxation N (%)
## 1: GRP
## 2: No 27 (75.0) 28 (87.5)
## 3: Yes 9 (25.0) 4 (12.5)
## 4: PSY
## 5: No 25 (69.4) 26 (81.2)
## 6: Yes 11 (30.6) 6 (18.8)
## 7: MED
## 8: No 31 (86.1) 29 (90.6)
## 9: Yes 5 (13.9) 3 (9.4)
## 10: PSYSL
## 11: No 33 (91.7) 31 (96.9)
## 12: Yes 3 (8.3) 1 (3.1)
## 13: MEDSL
## 14: No 27 (75.0) 24 (75.0)
## 15: Yes 9 (25.0) 8 (25.0)
## 16: HERSL
## 17: No 30 (83.3) 27 (84.4)
## 18: Yes 6 (16.7) 5 (15.6)
## Test
## 1: Chi-square = 1.71, df = 1, p = .191, Phi = 0.16
## 2:
## 3:
## 4: Chi-square = 1.26, df = 1, p = .262, Phi = 0.14
## 5:
## 6:
## 7: Chi-square = 0.33, df = 1, p = .564, Phi = 0.07
## 8:
## 9:
## 10: Chi-square = 0.83, df = 1, p = .362, Phi = 0.11
## 11:
## 12:
## 13: Chi-square = 0.00, df = 1, p = 1.000, Phi = 0.00
## 14:
## 15:
## 16: Chi-square = 0.01, df = 1, p = .907, Phi = 0.01
## 17:
## 18:
## PROs at post test
egltable(c(
"ISI_Total", "SRI_TScore", "SD_TScore", "FA_TScore", "DEP_TScore", "ANX_TScore"),
g = "condition",
data = droplevels(d.all[Survey == "Post"]),
parametric = TRUE)
## Relaxation M (SD) CBT+ M (SD)
## 1: ISI_Total 10.42 (5.51) 8.09 (5.78)
## 2: SRI_TScore 54.43 (8.55) 50.78 (9.35)
## 3: SD_TScore 51.33 (7.11) 49.74 (10.53)
## 4: FA_TScore 56.72 (7.61) 53.13 (9.61)
## 5: DEP_TScore 47.48 (9.95) 49.87 (9.28)
## 6: ANX_TScore 48.05 (11.14) 51.93 (10.46)
## Test
## 1: t(df=67) = 1.72, p = .091, d = 0.41
## 2: t(df=67) = 1.69, p = .096, d = 0.41
## 3: t(df=68) = 0.73, p = .465, d = 0.18
## 4: t(df=67) = 1.71, p = .092, d = 0.41
## 5: t(df=67) = -1.04, p = .304, d = 0.25
## 6: t(df=67) = -1.50, p = .139, d = 0.36
## does ISI at post differ by treatment during intervention?
t.test(ISI_Total ~ PSY, data = droplevels(d.all[Survey == "Post"]))
##
## Welch Two Sample t-test
##
## data: ISI_Total by PSY
## t = -1.1699, df = 26.588, p-value = 0.2524
## alternative hypothesis: true difference in means between group No and group Yes is not equal to 0
## 95 percent confidence interval:
## -4.980548 1.365018
## sample estimates:
## mean in group No mean in group Yes
## 8.60400 10.41176
t.test(ISI_Total ~ GRP, data = droplevels(d.all[Survey == "Post"]))
##
## Welch Two Sample t-test
##
## data: ISI_Total by GRP
## t = -0.84766, df = 16.607, p-value = 0.4087
## alternative hypothesis: true difference in means between group No and group Yes is not equal to 0
## 95 percent confidence interval:
## -5.396491 2.307033
## sample estimates:
## mean in group No mean in group Yes
## 8.762963 10.307692
t.test(ISI_Total ~ MED, data = droplevels(d.all[Survey == "Post"]))
##
## Welch Two Sample t-test
##
## data: ISI_Total by MED
## t = -0.36666, df = 7.7653, p-value = 0.7237
## alternative hypothesis: true difference in means between group No and group Yes is not equal to 0
## 95 percent confidence interval:
## -7.794124 5.665310
## sample estimates:
## mean in group No mean in group Yes
## 8.935593 10.000000
t.test(ISI_Total ~ PSYSL, data = droplevels(d.all[Survey == "Post"]))
##
## Welch Two Sample t-test
##
## data: ISI_Total by PSYSL
## t = -0.53852, df = 3.1431, p-value = 0.626
## alternative hypothesis: true difference in means between group No and group Yes is not equal to 0
## 95 percent confidence interval:
## -15.72576 11.07338
## sample estimates:
## mean in group No mean in group Yes
## 8.92381 11.25000
t.test(ISI_Total ~ MEDSL, data = droplevels(d.all[Survey == "Post"]))
##
## Welch Two Sample t-test
##
## data: ISI_Total by MEDSL
## t = -1.7039, df = 22.408, p-value = 0.1022
## alternative hypothesis: true difference in means between group No and group Yes is not equal to 0
## 95 percent confidence interval:
## -6.4510075 0.6284192
## sample estimates:
## mean in group No mean in group Yes
## 8.32400 11.23529
t.test(ISI_Total ~ HERSL, data = droplevels(d.all[Survey == "Post"]))
##
## Welch Two Sample t-test
##
## data: ISI_Total by HERSL
## t = -4.0108, df = 13.909, p-value = 0.001304
## alternative hypothesis: true difference in means between group No and group Yes is not equal to 0
## 95 percent confidence interval:
## -10.069689 -3.049792
## sample estimates:
## mean in group No mean in group Yes
## 7.985714 14.545455
## evaluate whether HERSL is associated with change in ISI
tmpdat <- na.omit(merge(
d.all[Survey=="Baseline", .(ID, ISIB = ISI_Total)],
d.all[Survey=="Post", .(ID, ISIP = ISI_Total, HERSL)],
by = "ID", all = FALSE))
## during TX HERSL associated with baseline ISI?
t.test(ISIB ~ HERSL, data = tmpdat)
##
## Welch Two Sample t-test
##
## data: ISIB by HERSL
## t = -2.5163, df = 12.194, p-value = 0.02682
## alternative hypothesis: true difference in means between group No and group Yes is not equal to 0
## 95 percent confidence interval:
## -9.8604921 -0.7174299
## sample estimates:
## mean in group No mean in group Yes
## 11.89286 17.18182
## during TX HERSL associated with change in ISI?
t.test(I(ISIP - ISIB) ~ HERSL, data = tmpdat)
##
## Welch Two Sample t-test
##
## data: I(ISIP - ISIB) by HERSL
## t = -0.60638, df = 12.113, p-value = 0.5555
## alternative hypothesis: true difference in means between group No and group Yes is not equal to 0
## 95 percent confidence interval:
## -5.832171 3.290613
## sample estimates:
## mean in group No mean in group Yes
## -3.907143 -2.636364
Session Info for Documentation
sessionInfo()
## R version 4.1.0 (2021-05-18)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 10 x64 (build 19043)
##
## Matrix products: default
##
## locale:
## [1] LC_COLLATE=English_United States.1252
## [2] LC_CTYPE=English_United States.1252
## [3] LC_MONETARY=English_United States.1252
## [4] LC_NUMERIC=C
## [5] LC_TIME=English_United States.1252
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] MplusAutomation_0.8 data.table_1.14.0 reshape2_1.4.4
## [4] JWileymisc_1.2.1 scales_1.1.1 ggpubr_0.4.0
## [7] ggplot2_3.3.3 rmarkdown_2.8 knitr_1.33
##
## loaded via a namespace (and not attached):
## [1] TH.data_1.0-10 VGAM_1.1-5 minqa_1.2.4
## [4] colorspace_2.0-1 ggsignif_0.6.1 ellipsis_0.3.2
## [7] rio_0.5.26 estimability_1.3 htmlTable_2.2.1
## [10] base64enc_0.1-3 rstudioapi_0.13 mice_3.13.0
## [13] farver_2.1.0 lavaan_0.6-8 MatrixModels_0.5-0
## [16] fansi_0.5.0 mvtnorm_1.1-1 codetools_0.2-18
## [19] splines_4.1.0 mnormt_2.0.2 robustbase_0.93-8
## [22] texreg_1.37.5 Formula_1.2-4 jsonlite_1.7.2
## [25] nloptr_1.2.2.2 broom_0.7.6 cluster_2.1.2
## [28] png_0.1-7 httr_1.4.2 compiler_4.1.0
## [31] emmeans_1.6.1 backports_1.2.1 assertthat_0.2.1
## [34] Matrix_1.3-4 htmltools_0.5.1.1 quantreg_5.85
## [37] tools_4.1.0 coda_0.19-4 gtable_0.3.0
## [40] glue_1.4.2 dplyr_1.0.6 ggthemes_4.2.4
## [43] Rcpp_1.0.6 carData_3.0-4 cellranger_1.1.0
## [46] jquerylib_0.1.4 vctrs_0.3.8 nlme_3.1-152
## [49] conquer_1.0.2 psych_2.1.3 xfun_0.23
## [52] stringr_1.4.0 proto_1.0.0 openxlsx_4.2.3
## [55] lme4_1.1-27 lifecycle_1.0.0 extraoperators_0.1.1
## [58] rstatix_0.7.0 polspline_1.1.19 DEoptimR_1.0-9
## [61] MASS_7.3-54 zoo_1.8-9 hms_1.1.0
## [64] parallel_4.1.0 sandwich_3.0-1 SparseM_1.81
## [67] RColorBrewer_1.1-2 yaml_2.2.1 curl_4.3.1
## [70] gridExtra_2.3 pander_0.6.3 sass_0.4.0
## [73] rms_6.2-0 rpart_4.1-15 latticeExtra_0.6-29
## [76] stringi_1.6.2 highr_0.9 checkmate_2.0.0
## [79] boot_1.3-28 zip_2.2.0 rlang_0.4.11
## [82] pkgconfig_2.0.3 matrixStats_0.59.0 evaluate_0.14
## [85] lattice_0.20-44 purrr_0.3.4 labeling_0.4.2
## [88] htmlwidgets_1.5.3 cowplot_1.1.1 tidyselect_1.1.1
## [91] ggsci_2.9 plyr_1.8.6 magrittr_2.0.1
## [94] R6_2.5.0 generics_0.1.0 multcompView_0.1-8
## [97] Hmisc_4.5-0 multcomp_1.4-17 DBI_1.1.1
## [100] gsubfn_0.7 pillar_1.6.1 haven_2.4.1
## [103] foreign_0.8-81 withr_2.4.2 mgcv_1.8-36
## [106] survival_3.2-11 abind_1.4-5 nnet_7.3-16
## [109] tibble_3.1.2 crayon_1.4.1 car_3.0-10
## [112] utf8_1.2.1 tmvnsim_1.0-2 jpeg_0.1-8.1
## [115] grid_4.1.0 readxl_1.3.1 pbivnorm_0.6.0
## [118] forcats_0.5.1 digest_0.6.27 xtable_1.8-4
## [121] GPArotation_2014.11-1 tidyr_1.1.3 stats4_4.1.0
## [124] munsell_0.5.0 bslib_0.2.5.1