-
Notifications
You must be signed in to change notification settings - Fork 2
/
SensitivityAnalysis.R
383 lines (295 loc) · 16.1 KB
/
SensitivityAnalysis.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
"
@author: Zsofia Koma, UvA
Aim: Sensitivity analysis for resolution and number of selected training data
"
library(randomForest)
library(caret)
library(rfUtilities)
library(ggplot2)
library(gridExtra)
library(ggrepel)
library(reshape2)
library(corrplot)
#source("D:/Koma/GitHub/myPhD_escience_analysis/Paper1_inR_v2/Function_Classification.R")
#source("D:/GitHub/eEcoLiDAR/myPhD_escience_analysis/Paper1_inR_v2/Function_Classification.R")
#source("C:/Koma/Github/komazsofi/myPhD_escience_analysis/Paper1_inR_v2/Function_Classification.R") #set where the Function*.R file located
source("D:/Koma/GitHub/PhDPaper1_Classifying_wetland_habitats/Function_Classification.R")
res=10
# Set global variables
#setwd("D:/Koma/Paper1_v2/Run4_2019April/")
#setwd("D:/Sync/_Amsterdam/02_Paper1_ReedbedStructure_onlyALS/3_Dataprocessing/Results_17April/")
setwd("D:/Sync/_Amsterdam/02_Paper1_ReedbedStructure_onlyALS/3_Dataprocessing/Paper1_revision/")
# Import
featuretable_l1_100=read.csv(paste("featuretable_l1_",res,"_100.csv",sep=""))
featuretable_l2_100=read.csv(paste("featuretable_l2_",res,"_100.csv",sep=""))
featuretable_l3_100=read.csv(paste("featuretable_l3_",res,"_100.csv",sep=""))
featuretable_l1_500=read.csv(paste("featuretable_l1_",res,"_500.csv",sep=""))
featuretable_l2_500=read.csv(paste("featuretable_l2_",res,"_500.csv",sep=""))
featuretable_l3_500=read.csv(paste("featuretable_l3_",res,"_500.csv",sep=""))
featuretable_l1_1000=read.csv(paste("featuretable_l1_",res,"_1000.csv",sep=""))
featuretable_l2_1000=read.csv(paste("featuretable_l2_",res,"_1000.csv",sep=""))
featuretable_l3_1000=read.csv(paste("featuretable_l3_",res,"_1000.csv",sep=""))
# One runs level 1
##
first_seed <- 5
accuracies_l1_100 <-c()
kappa_l1_100 <-c()
confm_m_100 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l1_100 <- caret::createDataPartition(y=featuretable_l1_100$V3, p=0.75, list=FALSE)
trainingSet_l1_100 <- featuretable_l1_100[trainIndex_l1_100,]
testingSet_l1_100 <- featuretable_l1_100[-trainIndex_l1_100,]
modelFit_l1_100 <- randomForest(trainingSet_l1_100[,1:32],factor(trainingSet_l1_100$V3),ntree=100,importance = TRUE)
prediction_l1_100 <- predict(modelFit_l1_100,testingSet_l1_100[ ,1:32])
conf_m_l1_100=confusionMatrix(factor(prediction_l1_100), factor(testingSet_l1_100$V3),mode = "everything")
accuracies_l1_100 <- c(accuracies_l1_100,conf_m_l1_100$overall["Accuracy"])
kappa_l1_100 <- c(kappa_l1_100,conf_m_l1_100$overall["Kappa"])
confm_m_100[i,"TP"]=conf_m_l1_100$table[1]
confm_m_100[i,"FN"]=conf_m_l1_100$table[2]
confm_m_100[i,"FP"]=conf_m_l1_100$table[3]
confm_m_100[i,"TN"]=conf_m_l1_100$table[4]
}
confm_m_100$useracc_p = confm_m_100$TP/(confm_m_100$TP+confm_m_100$FP)
confm_m_100$prodacc_p = confm_m_100$TP/(confm_m_100$TP+confm_m_100$FN)
confm_m_100$useracc_n = confm_m_100$TN/(confm_m_100$TN+confm_m_100$FN)
confm_m_100$prodacc_n = confm_m_100$TN/(confm_m_100$TN+confm_m_100$FP)
sink(paste("acc_l1_multi_",res,"_100.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l1_100),3)*100,"+-",round(sd(accuracies_l1_100),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l1_100),3),"+-",round(sd(kappa_l1_100),2)))
sink()
##
first_seed <- 5
accuracies_l1_500 <-c()
kappa_l1_500 <-c()
confm_m_500 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l1_500 <- caret::createDataPartition(y=featuretable_l1_500$V3, p=0.75, list=FALSE)
trainingSet_l1_500 <- featuretable_l1_500[trainIndex_l1_500,]
testingSet_l1_500 <- featuretable_l1_500[-trainIndex_l1_500,]
modelFit_l1_500 <- randomForest(trainingSet_l1_500[,1:32],factor(trainingSet_l1_500$V3),ntree=100,importance = TRUE)
prediction_l1_500 <- predict(modelFit_l1_500,testingSet_l1_500[ ,1:32])
conf_m_l1_500=confusionMatrix(factor(prediction_l1_500), factor(testingSet_l1_500$V3),mode = "everything")
accuracies_l1_500 <- c(accuracies_l1_500,conf_m_l1_500$overall["Accuracy"])
kappa_l1_500 <- c(kappa_l1_500,conf_m_l1_500$overall["Kappa"])
confm_m_500[i,"TP"]=conf_m_l1_500$table[1]
confm_m_500[i,"FN"]=conf_m_l1_500$table[2]
confm_m_500[i,"FP"]=conf_m_l1_500$table[3]
confm_m_500[i,"TN"]=conf_m_l1_500$table[4]
}
confm_m_500$useracc_p = confm_m_500$TP/(confm_m_500$TP+confm_m_500$FP)
confm_m_500$prodacc_p = confm_m_500$TP/(confm_m_500$TP+confm_m_500$FN)
confm_m_500$useracc_n = confm_m_500$TN/(confm_m_500$TN+confm_m_500$FN)
confm_m_500$prodacc_n = confm_m_500$TN/(confm_m_500$TN+confm_m_500$FP)
sink(paste("acc_l1_multi_",res,"_500.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l1_500),3)*100,"+-",round(sd(accuracies_l1_500),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l1_500),3),"+-",round(sd(kappa_l1_500),2)))
sink()
##
first_seed <- 5
accuracies_l1_1000 <-c()
kappa_l1_1000 <-c()
confm_m_1000 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l1_1000 <- caret::createDataPartition(y=featuretable_l1_1000$V3, p=0.75, list=FALSE)
trainingSet_l1_1000 <- featuretable_l1_1000[trainIndex_l1_1000,]
testingSet_l1_1000 <- featuretable_l1_1000[-trainIndex_l1_1000,]
modelFit_l1_1000 <- randomForest(trainingSet_l1_1000[,1:32],factor(trainingSet_l1_1000$V3),ntree=100,importance = TRUE)
prediction_l1_1000 <- predict(modelFit_l1_1000,testingSet_l1_1000[ ,1:32])
conf_m_l1_1000=confusionMatrix(factor(prediction_l1_1000), factor(testingSet_l1_1000$V3),mode = "everything")
accuracies_l1_1000 <- c(accuracies_l1_1000,conf_m_l1_1000$overall["Accuracy"])
kappa_l1_1000 <- c(kappa_l1_1000,conf_m_l1_1000$overall["Kappa"])
confm_m_1000[i,"TP"]=conf_m_l1_1000$table[1]
confm_m_1000[i,"FN"]=conf_m_l1_1000$table[2]
confm_m_1000[i,"FP"]=conf_m_l1_1000$table[3]
confm_m_1000[i,"TN"]=conf_m_l1_1000$table[4]
}
confm_m_1000$useracc_p = confm_m_1000$TP/(confm_m_1000$TP+confm_m_1000$FP)
confm_m_1000$prodacc_p = confm_m_1000$TP/(confm_m_1000$TP+confm_m_1000$FN)
confm_m_1000$useracc_n = confm_m_1000$TN/(confm_m_1000$TN+confm_m_1000$FN)
confm_m_1000$prodacc_n = confm_m_1000$TN/(confm_m_1000$TN+confm_m_1000$FP)
sink(paste("acc_l1_multi_",res,"_1000.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l1_1000),3)*100,"+-",round(sd(accuracies_l1_1000),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l1_1000),3),"+-",round(sd(kappa_l1_1000),2)))
sink()
# One runs level 2
##
first_seed <- 5
accuracies_l2_100 <-c()
kappa_l2_100 <-c()
confm_m_100 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l2_100 <- caret::createDataPartition(y=featuretable_l2_100$V3, p=0.75, list=FALSE)
trainingSet_l2_100 <- featuretable_l2_100[trainIndex_l2_100,]
testingSet_l2_100 <- featuretable_l2_100[-trainIndex_l2_100,]
modelFit_l2_100 <- randomForest(trainingSet_l2_100[,1:32],factor(trainingSet_l2_100$V3),ntree=100,importance = TRUE)
prediction_l2_100 <- predict(modelFit_l2_100,testingSet_l2_100[ ,1:32])
conf_m_l2_100=confusionMatrix(factor(prediction_l2_100), factor(testingSet_l2_100$V3),mode = "everything")
accuracies_l2_100 <- c(accuracies_l2_100,conf_m_l2_100$overall["Accuracy"])
kappa_l2_100 <- c(kappa_l2_100,conf_m_l2_100$overall["Kappa"])
confm_m_100[i,"TP"]=conf_m_l2_100$table[1]
confm_m_100[i,"FN"]=conf_m_l2_100$table[2]
confm_m_100[i,"FP"]=conf_m_l2_100$table[3]
confm_m_100[i,"TN"]=conf_m_l2_100$table[4]
}
confm_m_100$useracc_p = confm_m_100$TP/(confm_m_100$TP+confm_m_100$FP)
confm_m_100$prodacc_p = confm_m_100$TP/(confm_m_100$TP+confm_m_100$FN)
confm_m_100$useracc_n = confm_m_100$TN/(confm_m_100$TN+confm_m_100$FN)
confm_m_100$prodacc_n = confm_m_100$TN/(confm_m_100$TN+confm_m_100$FP)
sink(paste("acc_l2_multi_",res,"_100.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l2_100),3)*100,"+-",round(sd(accuracies_l2_100),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l2_100),3),"+-",round(sd(kappa_l2_100),2)))
sink()
##
first_seed <- 5
accuracies_l2_500 <-c()
kappa_l2_500 <-c()
confm_m_500 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l2_500 <- caret::createDataPartition(y=featuretable_l2_500$V3, p=0.75, list=FALSE)
trainingSet_l2_500 <- featuretable_l2_500[trainIndex_l2_500,]
testingSet_l2_500 <- featuretable_l2_500[-trainIndex_l2_500,]
modelFit_l2_500 <- randomForest(trainingSet_l2_500[,1:32],factor(trainingSet_l2_500$V3),ntree=100,importance = TRUE)
prediction_l2_500 <- predict(modelFit_l2_500,testingSet_l2_500[ ,1:32])
conf_m_l2_500=confusionMatrix(factor(prediction_l2_500), factor(testingSet_l2_500$V3),mode = "everything")
accuracies_l2_500 <- c(accuracies_l2_500,conf_m_l2_500$overall["Accuracy"])
kappa_l2_500 <- c(kappa_l2_500,conf_m_l2_500$overall["Kappa"])
confm_m_500[i,"TP"]=conf_m_l2_500$table[1]
confm_m_500[i,"FN"]=conf_m_l2_500$table[2]
confm_m_500[i,"FP"]=conf_m_l2_500$table[3]
confm_m_500[i,"TN"]=conf_m_l2_500$table[4]
}
confm_m_500$useracc_p = confm_m_500$TP/(confm_m_500$TP+confm_m_500$FP)
confm_m_500$prodacc_p = confm_m_500$TP/(confm_m_500$TP+confm_m_500$FN)
confm_m_500$useracc_n = confm_m_500$TN/(confm_m_500$TN+confm_m_500$FN)
confm_m_500$prodacc_n = confm_m_500$TN/(confm_m_500$TN+confm_m_500$FP)
sink(paste("acc_l2_multi_",res,"_500.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l2_500),3)*100,"+-",round(sd(accuracies_l2_500),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l2_500),3),"+-",round(sd(kappa_l2_500),2)))
sink()
##
first_seed <- 5
accuracies_l2_1000 <-c()
kappa_l2_1000 <-c()
confm_m_1000 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l2_1000 <- caret::createDataPartition(y=featuretable_l2_1000$V3, p=0.75, list=FALSE)
trainingSet_l2_1000 <- featuretable_l2_1000[trainIndex_l2_1000,]
testingSet_l2_1000 <- featuretable_l2_1000[-trainIndex_l2_1000,]
modelFit_l2_1000 <- randomForest(trainingSet_l2_1000[,1:32],factor(trainingSet_l2_1000$V3),ntree=100,importance = TRUE)
prediction_l2_1000 <- predict(modelFit_l2_1000,testingSet_l2_1000[ ,1:32])
conf_m_l2_1000=confusionMatrix(factor(prediction_l2_1000), factor(testingSet_l2_1000$V3),mode = "everything")
accuracies_l2_1000 <- c(accuracies_l2_1000,conf_m_l2_1000$overall["Accuracy"])
kappa_l2_1000 <- c(kappa_l2_1000,conf_m_l2_1000$overall["Kappa"])
confm_m_1000[i,"TP"]=conf_m_l2_1000$table[1]
confm_m_1000[i,"FN"]=conf_m_l2_1000$table[2]
confm_m_1000[i,"FP"]=conf_m_l2_1000$table[3]
confm_m_1000[i,"TN"]=conf_m_l2_1000$table[4]
}
confm_m_1000$useracc_p = confm_m_1000$TP/(confm_m_1000$TP+confm_m_1000$FP)
confm_m_1000$prodacc_p = confm_m_1000$TP/(confm_m_1000$TP+confm_m_1000$FN)
confm_m_1000$useracc_n = confm_m_1000$TN/(confm_m_1000$TN+confm_m_1000$FN)
confm_m_1000$prodacc_n = confm_m_1000$TN/(confm_m_1000$TN+confm_m_1000$FP)
sink(paste("acc_l2_multi_",res,"_1000.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l2_1000),3)*100,"+-",round(sd(accuracies_l2_1000),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l2_1000),3),"+-",round(sd(kappa_l2_1000),2)))
sink()
# One runs level 3
##
first_seed <- 5
accuracies_l3_100 <-c()
kappa_l3_100 <-c()
confm_m_100 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l3_100 <- caret::createDataPartition(y=featuretable_l3_100$V3, p=0.75, list=FALSE)
trainingSet_l3_100 <- featuretable_l3_100[trainIndex_l3_100,]
testingSet_l3_100 <- featuretable_l3_100[-trainIndex_l3_100,]
modelFit_l3_100 <- randomForest(trainingSet_l3_100[,1:32],factor(trainingSet_l3_100$V3),ntree=100,importance = TRUE)
prediction_l3_100 <- predict(modelFit_l3_100,testingSet_l3_100[ ,1:32])
conf_m_l3_100=confusionMatrix(factor(prediction_l3_100), factor(testingSet_l3_100$V3),mode = "everything")
accuracies_l3_100 <- c(accuracies_l3_100,conf_m_l3_100$overall["Accuracy"])
kappa_l3_100 <- c(kappa_l3_100,conf_m_l3_100$overall["Kappa"])
confm_m_100[i,"TP"]=conf_m_l3_100$table[1]
confm_m_100[i,"FN"]=conf_m_l3_100$table[2]
confm_m_100[i,"FP"]=conf_m_l3_100$table[3]
confm_m_100[i,"TN"]=conf_m_l3_100$table[4]
}
confm_m_100$useracc_p = confm_m_100$TP/(confm_m_100$TP+confm_m_100$FP)
confm_m_100$prodacc_p = confm_m_100$TP/(confm_m_100$TP+confm_m_100$FN)
confm_m_100$useracc_n = confm_m_100$TN/(confm_m_100$TN+confm_m_100$FN)
confm_m_100$prodacc_n = confm_m_100$TN/(confm_m_100$TN+confm_m_100$FP)
sink(paste("acc_l3_multi_",res,"_100.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l3_100),3)*100,"+-",round(sd(accuracies_l3_100),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l3_100),3),"+-",round(sd(kappa_l3_100),2)))
sink()
##
first_seed <- 5
accuracies_l3_500 <-c()
kappa_l3_500 <-c()
confm_m_500 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l3_500 <- caret::createDataPartition(y=featuretable_l3_500$V3, p=0.75, list=FALSE)
trainingSet_l3_500 <- featuretable_l3_500[trainIndex_l3_500,]
testingSet_l3_500 <- featuretable_l3_500[-trainIndex_l3_500,]
modelFit_l3_500 <- randomForest(trainingSet_l3_500[,1:32],factor(trainingSet_l3_500$V3),ntree=100,importance = TRUE)
prediction_l3_500 <- predict(modelFit_l3_500,testingSet_l3_500[ ,1:32])
conf_m_l3_500=confusionMatrix(factor(prediction_l3_500), factor(testingSet_l3_500$V3),mode = "everything")
accuracies_l3_500 <- c(accuracies_l3_500,conf_m_l3_500$overall["Accuracy"])
kappa_l3_500 <- c(kappa_l3_500,conf_m_l3_500$overall["Kappa"])
confm_m_500[i,"TP"]=conf_m_l3_500$table[1]
confm_m_500[i,"FN"]=conf_m_l3_500$table[2]
confm_m_500[i,"FP"]=conf_m_l3_500$table[3]
confm_m_500[i,"TN"]=conf_m_l3_500$table[4]
}
confm_m_500$useracc_p = confm_m_500$TP/(confm_m_500$TP+confm_m_500$FP)
confm_m_500$prodacc_p = confm_m_500$TP/(confm_m_500$TP+confm_m_500$FN)
confm_m_500$useracc_n = confm_m_500$TN/(confm_m_500$TN+confm_m_500$FN)
confm_m_500$prodacc_n = confm_m_500$TN/(confm_m_500$TN+confm_m_500$FP)
sink(paste("acc_l3_multi_",res,"_500.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l3_500),3)*100,"+-",round(sd(accuracies_l3_500),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l3_500),3),"+-",round(sd(kappa_l3_500),2)))
sink()
##
first_seed <- 5
accuracies_l3_1000 <-c()
kappa_l3_1000 <-c()
confm_m_1000 <- data.frame(TP=integer(),FN=integer(),FP=integer(),TN=integer())
for (i in 1:100){
set.seed(first_seed)
first_seed <- first_seed+1
trainIndex_l3_1000 <- caret::createDataPartition(y=featuretable_l3_1000$V3, p=0.75, list=FALSE)
trainingSet_l3_1000 <- featuretable_l3_1000[trainIndex_l3_1000,]
testingSet_l3_1000 <- featuretable_l3_1000[-trainIndex_l3_1000,]
modelFit_l3_1000 <- randomForest(trainingSet_l3_1000[,1:32],factor(trainingSet_l3_1000$V3),ntree=100,importance = TRUE)
prediction_l3_1000 <- predict(modelFit_l3_1000,testingSet_l3_1000[ ,1:32])
conf_m_l3_1000=confusionMatrix(factor(prediction_l3_1000), factor(testingSet_l3_1000$V3),mode = "everything")
accuracies_l3_1000 <- c(accuracies_l3_1000,conf_m_l3_1000$overall["Accuracy"])
kappa_l3_1000 <- c(kappa_l3_1000,conf_m_l3_1000$overall["Kappa"])
confm_m_1000[i,"TP"]=conf_m_l3_1000$table[1]
confm_m_1000[i,"FN"]=conf_m_l3_1000$table[2]
confm_m_1000[i,"FP"]=conf_m_l3_1000$table[3]
confm_m_1000[i,"TN"]=conf_m_l3_1000$table[4]
}
confm_m_1000$useracc_p = confm_m_1000$TP/(confm_m_1000$TP+confm_m_1000$FP)
confm_m_1000$prodacc_p = confm_m_1000$TP/(confm_m_1000$TP+confm_m_1000$FN)
confm_m_1000$useracc_n = confm_m_1000$TN/(confm_m_1000$TN+confm_m_1000$FN)
confm_m_1000$prodacc_n = confm_m_1000$TN/(confm_m_1000$TN+confm_m_1000$FP)
sink(paste("acc_l3_multi_",res,"_1000.txt",sep=""))
print(paste("Multi run OA:",round(mean(accuracies_l3_1000),3)*100,"+-",round(sd(accuracies_l3_1000),2)*100))
print(paste("Multi run Kappa:",round(mean(kappa_l3_1000),3),"+-",round(sd(kappa_l3_1000),2)))
sink()