This repository has been archived by the owner on Apr 13, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaccuracy.R
178 lines (128 loc) · 6.94 KB
/
accuracy.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
# 6_RUNS ACCURACY ANALYSIS
# Violet Kozloff
# November 3, 2017
# This script analyzes structured and random blocks across four tasks: auditory (speech and tones) and visual (letters and images).
# It measures the mean reaction time and the slope of the reaction time for each participant for each condition.
# It also runs an ANOVA to compare reaction time slope between tasks, modalities, and domains.
# NOTE: f002_auditory_6 has been modified to reflect the correct participant id
# NOTE: Does not remove points outside 2.5 stdev of mean
# NOTE: relevant columns pre-selected through this experiment's version of fmri_data_cleaning.Rmd
# ****************************************************************************
# ******************** I. PREPARE FILES *************************
# Prepare workspace ------------------------------------------------------------------------------------------------------
# Move to folder from GitHub (Mac when folder is on desktop)
setwd("/Users/qigroup/Desktop/fmri-pilot-beh-analysis-master/accuracy/6_runs/scripts/")
# Remove objects in environment
rm(list=ls())
# Prepare paths for files --------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#For use on Mac
auditory_path <- ("../cleaned_data/auditory/")
visual_path <- ("../cleaned_data/visual/")
# List auditory and visual files---------------------------------------------------------------------------------------------------------------------------------------------------------------------
afiles <- list.files(path=auditory_path, pattern="*.csv")
vfiles <- list.files(path=visual_path, pattern = ".csv")
#Remove the underscores in file names
auditory_files <- gsub("_", "", afiles)
visual_files <- gsub("_", "", vfiles)
# Read in auditory files and combine them into one data frame--------------------------------------------------------------------------------------------------------------------------------------------
# Prepare auditory_data_frame to hold the files you're reading in
auditory_data_frame<-list()
# Remove the dashes in each file name read it in
for(auditory_file in afiles)
{
assign(
gsub("_", "", auditory_file),
read.csv(paste(auditory_path, auditory_file, sep="")))
}
# Combine each file with the previous files into auditory_data_frame
for (afile in auditory_files){auditory_data_frame <- append(auditory_data_frame, list(eval(parse(text=afile))))}
auditory_data_frame <- do.call(rbind.data.frame, auditory_data_frame)
# Prepare auditory files for use----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Rename expName column as "domain"
names(auditory_data_frame)[names(auditory_data_frame) == 'expName'] <- 'domain'
# Read in visual files and combine them into one data frame------------------------------------------------------------------------------------------------------
# Prepare visual_data_frame to hold the files you're reading in
visual_data_frame <- data.frame()
visual_data_frame<-list()
# Remove the dashes from each file name and read it in
for(visual_file in vfiles)
{
assign(
gsub("_", "", visual_file),
read.csv(paste(visual_path, visual_file, sep="")))
}
#Combine the visual files into visual_data_frame
for (vfile in visual_files){visual_data_frame <- append(visual_data_frame, list(eval(parse(text=vfile))))}
visual_data_frame <- do.call(rbind.data.frame, visual_data_frame)
# Rename expName column as 'domain'
names(visual_data_frame)[names(visual_data_frame) == 'expName'] <- 'domain'
# ******************** II. FIND AUDITORY ACCURACY *************************
# Auditory: Create a single data frame with each participant's accuracy for each condition-----------------------------------------------------------------------------------------------------
# List unique participant IDs for this condition
list_part_id <- unique(auditory_data_frame$PartID)
# List unique tasks for this condition
list_tasks <- lapply(unique(auditory_data_frame$task), as.character)
# Separate SSL and TSL
ssl <- (auditory_data_frame[ which(auditory_data_frame$task=="SSL"),])
tsl <- (auditory_data_frame[ which(auditory_data_frame$task=="TSL"),])
# Set up data frame to hold accuracies
accuracy <- NULL
part_id <- NULL
domain <- NULL
modality <- NULL
type <- NULL
task<- NULL
# auditory_data_frame$accuracy <- as.factor(auditory_data_frame$accuracy)
# For each participant, extract id
# Assign domain, type, and modality
# Calculate and record mean_rt, rt_slope, upper bound, and lower bound
for(id in list_part_id){
for (t in list_tasks){
if (t=="SSL"){
modality <- append (modality, "linguistic")}
if (t=="TSL")
{modality <- append (modality, "non-linguistic")}
part_id <- append(part_id, id)
domain <- append(domain, "auditory")
task <- append(task, as.character(t))
accuracy <- append(accuracy, round(mean(auditory_data_frame[ which(auditory_data_frame$PartID==id
& auditory_data_frame$task== t), ]$key_resp.corr), digits =3))
}
}
# Combine data for each participant
auditory_accuracies <- data.frame(part_id, task, domain, modality, accuracy)
# ******************** III. FIND VISUAL ACCURACY *************************
# visual: Create a single data frame with each participant's accuracy for each condition-----------------------------------------------------------------------------------------------------
# List unique participant IDs for this condition
list_part_id <- unique(visual_data_frame$PartID)
# List unique tasks for this condition
list_tasks <- lapply(unique(visual_data_frame$task), as.character)
# Separate lsl and vsl
lsl <- (visual_data_frame[ which(visual_data_frame$task=="lsl"),])
vsl <- (visual_data_frame[ which(visual_data_frame$task=="vsl"),])
# Set up data frame to hold accuracies
accuracy <- NULL
part_id <- NULL
domain <- NULL
modality <- NULL
type <- NULL
task<- NULL
# visual_data_frame$accuracy <- as.factor(visual_data_frame$accuracy)
# For each participant, extract id
# Assign domain, type, and modality
# Calculate and record mean_rt, rt_slope, upper bound, and lower bound
for(id in list_part_id){
for (t in list_tasks){
if (t=="LSL"){
modality <- append (modality, "linguistic")}
if (t=="VSL")
{modality <- append (modality, "non-linguistic")}
part_id <- append(part_id, id)
domain <- append(domain, "visual")
task <- append(task, as.character(t))
accuracy <- append(accuracy, round(mean(visual_data_frame[ which(visual_data_frame$PartID==id
& visual_data_frame$task== t), ]$key_resp.corr), digits =3))
}
}
# Combine data for each participant
visual_accuracies <- data.frame(part_id, task, domain, modality, accuracy)