forked from quan3010/temporal_analysis
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLASI21_Nguyen_Day 1.Rmd
More file actions
205 lines (152 loc) · 6.01 KB
/
LASI21_Nguyen_Day 1.Rmd
File metadata and controls
205 lines (152 loc) · 6.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
---
title: "Temporal and sequential analysis for learning analytics"
output: html_notebook
author: Quan Nguyen, Postdoctoral Fellow, School of Information, University of Michigan
---
# Day 1: Sequence analysis using TraMineR - EDA
# Day 2: Sequence analysis using TraMineR - Matching, clustering
# Day 3: Association rule mining using priori algorithm
## Loading libaries
```{r}
library(ggplot2)
library(data.table)
library(TraMineR)
library(dplyr)
library(tidyr)
```
```{r}
library(TraMineR)
data(mvad)
seqstatl(mvad[, 17:86])
mvad.alphabet <- c("employment", "FE", "HE", "joblessness", "school",
"training")
mvad.labels <- c("employment", "further education", "higher education",
"joblessness", "school", "training")
mvad.scodes <- c("EM", "FE", "HE", "JL", "SC", "TR")
mvad.seq <- seqdef(mvad, 17:86, alphabet = mvad.alphabet, states = mvad.scodes,
labels = mvad.labels, xtstep = 6)
```
```{r}
head(mvad)
```
```{r}
seqiplot(mvad.seq, with.legend = FALSE, title= "Index plot (10 first sequences")
```
```{r}
seqIplot(mvad.seq, sortv = "from.start", with.legend = FALSE)
```
```{r}
seqfplot(mvad.seq, withlegend = F, title = "Sequence frequency plot bar width proportional to the frequencies")
```
```{r}
seqdplot(mvad.seq, withlegend = F, title = "State distribution plot")
```
# Import log data
```{r}
lasi21_logdata <- read_csv("lasi21_logdata.csv")
lasi21_logdata$site_type <- ifelse(grepl("ssignment", lasi21_logdata$instancename), "assignment",lasi21_logdata$site_type )
lasi21_logdata$site_type <- ifelse(grepl("exam", lasi21_logdata$instancename), "assignment",lasi21_logdata$site_type )
lasi21_logdata <- as.data.table(lasi21_logdata[,c("id","date_time","spent_time","site_type","instancename")])
head(lasi21_logdata)
```
# Data pre-processing
```{r}
# convert ms to minutes
lasi21_logdata$spent_time_m <- round(lasi21_logdata$spent_time/60000, digits=1)
# create a flag for session break (aka where time spent > 30 minutes)
lasi21_logdata$session_flag = 0
lasi21_logdata$session_flag <- ifelse(lasi21_logdata$spent_time_m>30, 1, lasi21_logdata$session_flag)
# filter out all spent_time < 6s
lasi21_logdata2 <- lasi21_logdata[spent_time>=6000,]
# create session number
lasi21_logdata2 <- lasi21_logdata2 %>%
arrange(id,date_time,spent_time) %>%
mutate(session_num = cumsum(session_flag))
# remove session break
lasi21_logdata2 <- lasi21_logdata2[session_flag==0,]
# for each learning session, calculate the cummulative time spent
lasi21_logdata2 <- lasi21_logdata2 %>%
arrange(id,date_time,spent_time) %>%
group_by(session_num) %>%
mutate(spent_time_m_cum = cumsum(spent_time_m))
# create time unit as 1/10 of a minute (6s)
lasi21_logdata2$time_unit <- round(lasi21_logdata2$spent_time_m_cum*10,digits=0)
hist(lasi21_logdata2$spent_time_m_cum, main="Learning session length", xlab = "Minutes", breaks=200)
```
# Transform data into STS format
```{r}
# create a function to expand time unit for each learning session
f1 <- function(x1){
x1 <- 1:max(x1)
m1 <- max(c(length(x1)))
length(x1) <- m1
list(time_unit = x1)
}
# create a subset
lasi21_logdata3 <- lasi21_logdata2[,c("session_num","time_unit","site_type")]
# create an expanded df
test <- setDT(lasi21_logdata3)[, f1(time_unit), .(session_num)]
# merge with log data
test <- merge(test,lasi21_logdata3,by=c("session_num","time_unit"),all.x=TRUE)
# fill missing upward
test <- test %>% fill(site_type,.direction = "up")
```
```{r}
# Sequence length distribution
hist(lasi21_logdata3[,max(time_unit), by="session_num"]$V1, breaks=100, main="Length of learning session", xlab='Sequence length')
```
```{r}
# cummulative plot of seq length
plot(ecdf(lasi21_logdata3[,max(time_unit), by="session_num"]$V1), main ='Cummulative distribution of seq length')
```
# Define sequences
```{r}
# convert to STS format (wide format)
log_sts <- spread(test, time_unit, site_type)
# define sequences columns
log_sts.seq <- seqdef(log_sts,2:1200)
```
# EDA
```{r}
# plot the first 10 sequences with length=100
layout(matrix(c(1,1,2,1,1,2,3,4,4), nrow = 1, ncol = 3, byrow = TRUE))
seqiplot(log_sts.seq[611:621,1:100], withlegend = F, title = "Index plot (10 first sequences)")
seqlegend(log_sts.seq)
```
```{r}
# State distribution plot
layout(matrix(c(1,1,2,1,1,2,3,4,4), nrow = 1, ncol = 3, byrow = TRUE))
seqdplot(log_sts.seq[,1:200], title = "State distribution plot", withlegend = FALSE)
seqlegend(log_sts.seq)
```
```{r}
# Sequence frequency plot
layout(matrix(c(1,1,2,1,1,2,3,4,4), nrow = 1, ncol = 3, byrow = TRUE))
seqfplot(log_sts.seq[,1:200], title = "Sequence frequency plot", withlegend = FALSE, pbarw = TRUE)
seqlegend(log_sts.seq)
```
```{r}
# Stability within sequences
# Shannon's entropy as a measure of the diversity of states observed at the considered time point
# It equals 0 when all cases are in the same state (it is thus easy to predict in which state an individual is)
# It is maximum when the cases are equally distributed between the states
seqHtplot(log_sts.seq[,1:200], title = "Entropy index")
```
```{r}
# Turbulence
Turbulence <- seqST(log_sts.seq[,1:200])
summary(Turbulence)
hist(Turbulence, col = "cyan", main = "Sequence turbulence",breaks=50)
```
```{r}
# Transitions of events
# define seq transitions
log_sts.seqe <- seqecreate(log_sts.seq[,1:200])
# find frequent subsequences
fsubseq <- seqefsub(log_sts.seqe, pMinSupport = 0.05)
# plot 15 most frequent subsquences
plot(fsubseq[1:15], col = "cyan", main="Top 15 frequent subsequences")
```
Add a new chunk by clicking the *Insert Chunk* button on the toolbar or by pressing *Cmd+Option+I*.
When you save the notebook, an HTML file containing the code and output will be saved alongside it (click the *Preview* button or press *Cmd+Shift+K* to preview the HTML file).
The preview shows you a rendered HTML copy of the contents of the editor. Consequently, unlike *Knit*, *Preview* does not run any R code chunks. Instead, the output of the chunk when it was last run in the editor is displayed.