Skip to content

Commit

Permalink
Merge pull request #1167 from wadpac/ghpages_reviseNotWornDocs_addCh13
Browse files Browse the repository at this point in the history
Update documentation on NotWorn algortihm and add circadian rhythm chapter to gh-pages
  • Loading branch information
vincentvanhees authored Jul 10, 2024
2 parents 9ae4851 + 80e1241 commit c3ce7b8
Show file tree
Hide file tree
Showing 14 changed files with 432 additions and 123 deletions.
1 change: 1 addition & 0 deletions .Rbuildignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ RELEASE_CYCLE.md
^pkgdown$
^vignettes/chapter*
^dev-functions$
^user-scripts$
.zenodo.json
^doc$
.chapterInProgress.Rmd
Expand Down
2 changes: 1 addition & 1 deletion _pkgdown.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ navbar:
- text: 12. Time-Use Analysis
href: articles/chapter12_TimeUseAnalysis.html
- text: 13. Circadian Rhythm Analysis
href: articles/chapterInProgress.html
href: articles/chapter13_CircadianRhythm.html
annexes:
text: Annexes
menu:
Expand Down
12 changes: 5 additions & 7 deletions man/GGIR.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -1167,13 +1167,11 @@ GGIR(mode = 1:5,
\item{HASPT.ignore.invalid}{
Boolean (default = FALSE).
To indicate whether invalid time segments should be ignored in the
Sleep Period Time detection. If \code{FALSE} (default), the imputed
angle or activity metric during the invalid time segments is used
in the Sleep Period Time detection. If \code{TRUE}, invalid time
segments are ignored for the Sleep Period Time detection
(i.e., considered to be out of the Sleep Period Time).
If \code{NA}, then invalid time segments are considered to be no movement
segments.
heuristic guiders. If \code{FALSE} (default), the imputed
angle or activity metric during the invalid time segments are used.
If \code{TRUE}, invalid time segments are ignored (i.e., they cannot
contribute to the guider). If \code{NA}, then invalid time segments are
considered to be no movement segments and can contribute to the guider.
}
\item{HASIB.algo}{
Expand Down
7 changes: 7 additions & 0 deletions user-scripts/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
In this folder you find R scripts that have proven useful for some GGIR users, but are not deemed read for inclusion in GGIR itself.

The reason these are not included as a GGIR functionality varies:

- Functionality not relevant for broader community
- Code not mature enough to be used inside GGIR
- More effort needed to comply with license requirements of copied code.
118 changes: 118 additions & 0 deletions user-scripts/agd2csv.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
# Developed by: Vincent van Hees
# Objective: convert ActiGraph .agd files from various generations to csv format that GGIR can then read

library(RSQLite)
library(utils)
library(foreach)

desiredtz = "Europe/Amsterdam"

# .agd location
datadir = "D:/agd"

# where to store the resulting csv files?
actigraphdir = "D:/projectoutput"

#===============================================================================
fns = dir(datadir, full.names = TRUE)

cores = parallel::detectCores()
Ncores = cores[1]
if (Ncores > 3) {
Ncores2use = min(c(Ncores - 1, 10, length(fns)))
cl <- parallel::makeCluster(Ncores2use) # not to overload your computer
doParallel::registerDoParallel(cl)
} else {
stop("Code assumes that there are more than 3 CPU cores available")
}

packages2passon = 'RSQLite'
errhand = 'pass'
`%myinfix%` = foreach::`%dopar%`
output_list = foreach::foreach(i = 1:length(fns), .packages = packages2passon,
.export = NULL, .errorhandling = errhand) %myinfix% {
tryCatchResult = tryCatch({
# for (i in 1:length(fns)) {
filename = fns[i]
t0 = Sys.time()
# Following 10 lines derived from
# https://github.com/github-js/acc/blob/master/R/readCounts.R
# but modified as their code does not recognise that it is a uniaxial GT3Xplus
con = DBI::dbConnect(RSQLite::SQLite(), dbname = filename)
settings <- DBI::dbReadTable(con, "settings")
tables = dbListTables(con, ".tables")
settings$settingName = tolower(settings$settingName)
deviceName <- settings$settingValue[settings$settingName == "devicename"]
deviceVersion <- settings$settingValue[settings$settingName == "deviceversion"]
serialNumber <- settings$settingValue[settings$settingName == "deviceserial"]
epochlength <- as.numeric(as.character(settings$settingValue[settings$settingName == "epochlength"]))
startdatetime <- settings$settingValue[settings$settingName == "startdatetime"]
softwareVersion <- settings$settingValue[settings$settingName == "softwareversion"]
filter <- settings$settingValue[settings$settingName == "filter"]
datetimeformat <- settings$settingValue[settings$settingName == "datetimeformat"]
if (length(softwareVersion) == 0) softwareVersion = ""

startdatetime2 <- as.POSIXlt((as.numeric(startdatetime)/1e+07),
origin = "0001-01-01 00:00:00", tz = "GMT")
startDate <- substring(startdatetime2, 1, 10)
counts <- DBI::dbReadTable(con, "data")
dbDisconnect(con)
counts <- counts[complete.cases(counts), ]
timeline = (0:as.integer((dim(counts)[1]) - 1) * epochlength)
rawTimeStamp = rep(startdatetime2, dim(counts)[1])
rst = gsub(" GMT", "", as.POSIXlt(rawTimeStamp, tz = "GMT") + timeline)
if (all(c("axis1", "axis2", "axis3") %in% colnames(counts))) {
type <- "triaxial"
counts = counts[, c("axis1", "axis2", "axis3")]
} else {
type <- "uniaxial"
counts = counts[, "axis1"]
}
data = data.frame(TimeStamp = as.vector(rst), counts = counts)
if (length(data) > 0) {
# Extract windowsize
tmp = as.POSIXlt(data$TimeStamp[1:2], format = "%Y-%m-%d %H:%M:%S", tz = desiredtz)
epochSize = as.numeric(difftime(tmp[2], tmp[1], units = "secs"))
if (length(epochSize) == 0) epochSize = epochlength
# Create file header for the Actigraph file
start = tmp[1]
starttime = strftime(start, format = "%H:%M:%S")
startdate = paste0(start$mon + 1, "/", start$mday, "/", start$year + 1900) #month day year
header = c(paste0("------------ Data File Converted from agd to csv for ActiGraph ",
deviceName, " ActiLife ", softwareVersion, " Firmware v", deviceVersion,
" date format ", datetimeformat, " Filter ", filter , " -----------"),
paste0("Serial Number: ", serialNumber),
paste0("Start Time ", starttime),
paste0("Start Date ", startdate),
paste0("Epoch Period (hh:mm:ss) 00:00:",
ifelse(test = nchar(as.character(epochSize)) == 1, yes = "0", no = ""), epochSize),
"Download Time ***",
"Download Date ***",
"Current Memory Address: 0",
"Current Battery Voltage: *.** Mode = **",
"--------------------------------------------------")

# Store to csv file
fname = unlist(strsplit(basename(filename), "[.]agd"))
fname_full = paste0(actigraphdir, "/", fname, "_", type, ".csv")
way_to_save = "original" #"original" # to aid investigating whether way to save file matters
if (way_to_save == "original") {
cat(paste(header, "\n", collapse = ""), file = fname_full)
write.table(data, file = fname_full, row.names = FALSE,
col.names = FALSE, sep = ",", fileEncoding = "UTF-8", append = TRUE)
} else if (way_to_save == "ruben") {
sink(fname_full)
for (hi in 1:length(header)) {
cat(paste0(header[hi],"\n"))
}
sink()
write.table(data, fname_full, append = TRUE, col.names = FALSE, row.names = FALSE, sep = ',')
}
}
t1 = Sys.time()
cat(paste0(round(as.numeric(difftime(time1 = t1, time2 = t0, units = "secs")), digits = 2), "secs "))
# }
})
return(tryCatchResult)
}
on.exit(parallel::stopCluster(cl))
189 changes: 189 additions & 0 deletions user-scripts/script_individualised_time_per_level.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
# Developed by: Vincent van Hees
# Objective: Facilitate research on individualized cut-points

library(GGIR)

# specifiy locations of files
setwd("D:/myprojectfolder")

individual_cutpoints_file = "individual_cutpoints.csv" # the file with the individual cutpoints
datadir = "testfiles" # folder with accelerometer data
outputdir = "."
GGIRresultsfolder = "output_testfiles/results" # path to GGIR results

#=======================================
# 1) Extract individual cut-points
#=======================================

ISWTcp = read.csv(individual_cutpoints_file) # ISWT cut-points
ilevels = sort(unique(c(0, as.vector(as.matrix(ISWTcp[, which(tolower(colnames(ISWTcp)) != "id")])), 8000)))

#=======================================
# 2) Run GGIR part 1 and 2
#=======================================

# Replace by your own GGIR call, but make sure ilevels = ilevels is part of it
GGIR(datadir = datadir,
outputdir = outputdir,
mode = NULL,
do.enmo = TRUE,
do.mad = FALSE,
idloc = 2,
overwrite = TRUE,
qwindow = c(0, 24),
ilevels = ilevels,
do.report = c(),
visualreport = FALSE)

#=======================================
# 3) Combine columns based on individual cut-points
#=======================================

# declare functions
getMetrics = function(x) {
tmp = unlist(strsplit(x, "_"))
return(tmp[2])
}
getDaysegments = function(x) {
tmp = unlist(strsplit(x, "_"))
return(tmp[4])
}
getRange = function(x) {
tmp = unlist(strsplit(x, "[.]"))
if (tmp[3] == "8e") tmp[3] = "8000"
tmp = as.numeric(tmp[2:3])
return(tmp)
}
filterValues = function(x, metric, daySegment) {
return(x[intersect(grep(pattern = metric, x = x), grep(pattern = daySegment, x = x))])
}

#---------------------------------------
# Day report
#---------------------------------------
part2_dayreport_file = paste0(GGIRresultsfolder, "/part2_daysummary.csv")
part2_dayreport = read.csv(file = part2_dayreport_file)
colindex = grep(pattern = "X[.]", x = colnames(part2_dayreport))
values1 = colnames(part2_dayreport)[colindex]

# Account for possible multiple metrics and/or multiple day segments
metrics = unique(mapply(values1, FUN = getMetrics))
daySegments = unique(mapply(values1, FUN = getDaysegments))
ilevel_names = NULL
for (metric in metrics) {
for (daySegment in daySegments) {
# Extract bin range per ilevel
values = filterValues(values1, metric, daySegment)
ilevel_range = mapply(values, FUN = getRange)
# Loop over rows (days)
D = matrix(NA, nrow(part2_dayreport), ncol(ISWTcp))
for (j in 1:nrow(part2_dayreport)) {
ID = part2_dayreport$ID[j]
h = which(ISWTcp$ID == ID)

TotalTime = sum(part2_dayreport[j, colindex], na.rm = TRUE)

if (length(h) == 0) next # skip because ID not found in individual cut-point file
# The break statements below are to break the loop when individual cut-point file has empty cell
# Loop over rows in individual cutpoints overview
for (i in 1:(ncol(ISWTcp) - 1)) {
# Identify which columns in part2_daysummary need to be summed to get individualised level
if (i == 1) {
if (is.na(ISWTcp[h, i + 1])) {
break
}
columns_of_interest = which(ilevel_range[1,] >= 0 & ilevel_range[2,] <= ISWTcp[h, i + 1])
} else {
if (is.na(ISWTcp[h, i])) {
break
}
if (i + 1 <= ncol(ISWTcp)) {
if (is.na(ISWTcp[h, i + 1])) {
break
}
columns_of_interest = which(ilevel_range[1,] >= ISWTcp[h, i] & ilevel_range[2,] <= ISWTcp[h, i + 1])
} else {
columns_of_interest = which(ilevel_range[1,] >= ISWTcp[h, i])
}
}
time_in_levels = part2_dayreport[j, columns_of_interest + (min(colindex) - 1)]
if (!all(is.na(time_in_levels))) {
D[j, i] = sum(time_in_levels, na.rm = TRUE)
}
}
if (TotalTime != 0) {
D[j, ncol(ISWTcp)] = round(TotalTime - sum(D[j, ], na.rm = TRUE), digits = 3)
}
}
ilevel_names = c(ilevel_names, sub(pattern = "X[.]", replacement = "", x = colnames(ilevel_range)))
D = as.data.frame(D)
colnames(D)[1:(ncol(ISWTcp) - 1)] = paste0(metric, "_Level_", 0:(ncol(ISWTcp) - 2), "_", 1:(ncol(ISWTcp) - 1), "_", daySegment)
colnames(D)[ncol(ISWTcp)] = paste0(metric, "_above_highest_level_", daySegment)
part2_dayreport = cbind(part2_dayreport, D)
}
}
part2_dayreport_file_modified = paste0(unlist(strsplit(part2_dayreport_file, "[.]csv")), "_modified.csv")
data.table::fwrite(x = part2_dayreport, file = part2_dayreport_file_modified,
row.names = F, na = "")

#---------------------------------------
# Person report
#---------------------------------------
part2_personreport_file = paste0(GGIRresultsfolder, "/part2_summary.csv")
part2_personreport = read.csv(file = part2_personreport_file)

for (weeksegment in c("AD", "WE", "WD", "WWE", "WWD")) {
P2 = part2_personreport[grep(pattern = paste0(weeksegment, "|ID"), x = colnames(part2_personreport))]
colindex = grep(pattern = paste0(ilevel_names, collapse = "|"), x = colnames(P2))
values1 = colnames(P2)[colindex]
for (metric in metrics) {
for (daySegment in daySegments) {
values = filterValues(values1, metric, daySegment)
ilevel_range = mapply(values, FUN = getRange)
D = matrix(NA, nrow(P2), ncol(ISWTcp))
for (j in 1:nrow(P2)) {
ID = P2$ID[j]
h = which(ISWTcp$ID == ID)
TotalTime = sum(P2[j, colindex], na.rm = TRUE)
if (length(h) == 0) next # skip because ID not found in individual cut-point file
# The break statements below are to break the loop when individual cut-point file has empty cell
for (i in 1:(ncol(ISWTcp) - 1)) {
if (i == 1) {
if (is.na(ISWTcp[h, i + 1])) {
break
}
columns_of_interest = which(ilevel_range[1,] >= 0 & ilevel_range[2,] <= ISWTcp[h, i + 1])
} else {
if (is.na(ISWTcp[h, i])) {
break
}
if (i + 1 <= ncol(ISWTcp)) {
if (is.na(ISWTcp[h, i + 1])) {
break
}
columns_of_interest = which(ilevel_range[1,] >= ISWTcp[h, i] & ilevel_range[2,] <= ISWTcp[h, i + 1])
} else {
columns_of_interest = which(ilevel_range[1,] >= ISWTcp[h, i])
}
}
time_in_levels = P2[j, columns_of_interest + (min(colindex) - 1)]
if (!all(is.na(time_in_levels))) {
D[j, i] = sum(time_in_levels, na.rm = TRUE)
}
}
if (TotalTime != 0) {
D[j, ncol(ISWTcp)] = round(TotalTime - sum(D[j, ], na.rm = TRUE), digits = 3)
}
}
D = as.data.frame(D)
colnames(D)[1:(ncol(ISWTcp) - 1)] = paste0(weeksegment, "_", metric, "_Level_", 0:(ncol(ISWTcp) - 2), "_", 1:(ncol(ISWTcp) - 1), "_", daySegment)
colnames(D)[ncol(ISWTcp)] = paste0(weeksegment, "_", metric, "_above_highest_level_", daySegment)
part2_personreport = cbind(part2_personreport, D)
}
}
}

part2_personreport_file_modified = paste0(unlist(strsplit(part2_personreport_file, "[.]csv")), "_modified.csv")
data.table::fwrite(x = part2_personreport, file = part2_personreport_file_modified,
row.names = F, na = "")

Loading

0 comments on commit c3ce7b8

Please sign in to comment.